refactor: add suggestions from PR
Add suggestsions made by CodeRabbit on pull request
This commit is contained in:
parent
a8644e0bd7
commit
860218632f
3 changed files with 6 additions and 9 deletions
|
|
@ -22,13 +22,12 @@ def get_llm_client():
|
|||
|
||||
# Check if max_token value is defined in liteLLM for given model
|
||||
# if not use value from cognee configuration
|
||||
from cognee.infrastructure.llm.utils import get_model_max_tokens
|
||||
from cognee.infrastructure.llm.utils import (
|
||||
get_model_max_tokens,
|
||||
) # imported here to avoid circular imports
|
||||
|
||||
max_tokens = (
|
||||
get_model_max_tokens(llm_config.llm_model)
|
||||
if get_model_max_tokens(llm_config.llm_model)
|
||||
else llm_config.llm_max_tokens
|
||||
)
|
||||
model_max_tokens = get_model_max_tokens(llm_config.llm_model)
|
||||
max_tokens = model_max_tokens if model_max_tokens else llm_config.llm_max_tokens
|
||||
|
||||
if provider == LLMProvider.OPENAI:
|
||||
if llm_config.llm_api_key is None:
|
||||
|
|
|
|||
|
|
@ -11,5 +11,5 @@ class Document(DataPoint):
|
|||
mime_type: str
|
||||
_metadata: dict = {"index_fields": ["name"], "type": "Document"}
|
||||
|
||||
def read(self, chunk_size: int, chunker=str) -> str:
|
||||
def read(self, chunk_size: int, chunker=str, max_chunk_tokens: Optional[int] = None) -> str:
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -97,8 +97,6 @@ def _get_chunk_source_code(
|
|||
current_source_code = ""
|
||||
|
||||
# Get embedding engine used in vector database
|
||||
from cognee.infrastructure.databases.vector.get_vector_engine import get_vector_engine
|
||||
|
||||
embedding_engine = get_vector_engine().embedding_engine
|
||||
|
||||
for i, (child_code, token_count) in enumerate(code_token_counts):
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue