diff --git a/cognee/api/v1/cognify/cognify.py b/cognee/api/v1/cognify/cognify.py index 9862edd49..9371f7ffd 100644 --- a/cognee/api/v1/cognify/cognify.py +++ b/cognee/api/v1/cognify/cognify.py @@ -53,6 +53,7 @@ async def cognify( custom_prompt: Optional[str] = None, temporal_cognify: bool = False, data_per_batch: int = 20, + **kwargs ): """ Transform ingested data into a structured knowledge graph. @@ -223,6 +224,7 @@ async def cognify( config=config, custom_prompt=custom_prompt, chunks_per_batch=chunks_per_batch, + **kwargs, ) # By calling get pipeline executor we get a function that will have the run_pipeline run in the background or a function that we will need to wait for @@ -251,6 +253,7 @@ async def get_default_tasks( # TODO: Find out a better way to do this (Boris's config: Config = None, custom_prompt: Optional[str] = None, chunks_per_batch: int = 100, + **kwargs, ) -> list[Task]: if config is None: ontology_config = get_ontology_env_config() @@ -288,6 +291,7 @@ async def get_default_tasks( # TODO: Find out a better way to do this (Boris's config=config, custom_prompt=custom_prompt, task_config={"batch_size": chunks_per_batch}, + **kwargs, ), # Generate knowledge graphs from the document chunks. Task( summarize_text, diff --git a/cognee/infrastructure/llm/LLMGateway.py b/cognee/infrastructure/llm/LLMGateway.py index ab5bb35d7..fd42eb55e 100644 --- a/cognee/infrastructure/llm/LLMGateway.py +++ b/cognee/infrastructure/llm/LLMGateway.py @@ -11,7 +11,7 @@ class LLMGateway: @staticmethod def acreate_structured_output( - text_input: str, system_prompt: str, response_model: Type[BaseModel] + text_input: str, system_prompt: str, response_model: Type[BaseModel], **kwargs ) -> Coroutine: llm_config = get_llm_config() if llm_config.structured_output_framework.upper() == "BAML": @@ -31,7 +31,7 @@ class LLMGateway: llm_client = get_llm_client() return llm_client.acreate_structured_output( - text_input=text_input, system_prompt=system_prompt, response_model=response_model + text_input=text_input, system_prompt=system_prompt, response_model=response_model, **kwargs ) @staticmethod diff --git a/cognee/infrastructure/llm/extraction/knowledge_graph/extract_content_graph.py b/cognee/infrastructure/llm/extraction/knowledge_graph/extract_content_graph.py index 59e6f563a..4a40979f4 100644 --- a/cognee/infrastructure/llm/extraction/knowledge_graph/extract_content_graph.py +++ b/cognee/infrastructure/llm/extraction/knowledge_graph/extract_content_graph.py @@ -10,7 +10,7 @@ from cognee.infrastructure.llm.config import ( async def extract_content_graph( - content: str, response_model: Type[BaseModel], custom_prompt: Optional[str] = None + content: str, response_model: Type[BaseModel], custom_prompt: Optional[str] = None, **kwargs ): if custom_prompt: system_prompt = custom_prompt @@ -30,7 +30,7 @@ async def extract_content_graph( system_prompt = render_prompt(prompt_path, {}, base_directory=base_directory) content_graph = await LLMGateway.acreate_structured_output( - content, system_prompt, response_model + content, system_prompt, response_model, **kwargs ) return content_graph diff --git a/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/anthropic/adapter.py b/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/anthropic/adapter.py index b6f218022..58b68436c 100644 --- a/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/anthropic/adapter.py +++ b/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/anthropic/adapter.py @@ -52,7 +52,7 @@ class AnthropicAdapter(LLMInterface): reraise=True, ) async def acreate_structured_output( - self, text_input: str, system_prompt: str, response_model: Type[BaseModel] + self, text_input: str, system_prompt: str, response_model: Type[BaseModel], **kwargs ) -> BaseModel: """ Generate a response from a user query. diff --git a/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/gemini/adapter.py b/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/gemini/adapter.py index a8fcebbee..208c3729d 100644 --- a/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/gemini/adapter.py +++ b/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/gemini/adapter.py @@ -80,7 +80,7 @@ class GeminiAdapter(LLMInterface): reraise=True, ) async def acreate_structured_output( - self, text_input: str, system_prompt: str, response_model: Type[BaseModel] + self, text_input: str, system_prompt: str, response_model: Type[BaseModel], **kwargs ) -> BaseModel: """ Generate a response from a user query. diff --git a/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/generic_llm_api/adapter.py b/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/generic_llm_api/adapter.py index 9beb702e5..d6e00d40a 100644 --- a/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/generic_llm_api/adapter.py +++ b/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/generic_llm_api/adapter.py @@ -80,7 +80,7 @@ class GenericAPIAdapter(LLMInterface): reraise=True, ) async def acreate_structured_output( - self, text_input: str, system_prompt: str, response_model: Type[BaseModel] + self, text_input: str, system_prompt: str, response_model: Type[BaseModel], **kwargs ) -> BaseModel: """ Generate a response from a user query. diff --git a/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/mistral/adapter.py b/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/mistral/adapter.py index e9580faeb..e1131524d 100644 --- a/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/mistral/adapter.py +++ b/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/mistral/adapter.py @@ -69,7 +69,7 @@ class MistralAdapter(LLMInterface): reraise=True, ) async def acreate_structured_output( - self, text_input: str, system_prompt: str, response_model: Type[BaseModel] + self, text_input: str, system_prompt: str, response_model: Type[BaseModel], **kwargs ) -> BaseModel: """ Generate a response from the user query. diff --git a/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/ollama/adapter.py b/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/ollama/adapter.py index 877da23ef..211e49694 100644 --- a/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/ollama/adapter.py +++ b/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/ollama/adapter.py @@ -76,7 +76,7 @@ class OllamaAPIAdapter(LLMInterface): reraise=True, ) async def acreate_structured_output( - self, text_input: str, system_prompt: str, response_model: Type[BaseModel] + self, text_input: str, system_prompt: str, response_model: Type[BaseModel], **kwargs ) -> BaseModel: """ Generate a structured output from the LLM using the provided text and system prompt. @@ -123,7 +123,7 @@ class OllamaAPIAdapter(LLMInterface): before_sleep=before_sleep_log(logger, logging.DEBUG), reraise=True, ) - async def create_transcript(self, input_file: str) -> str: + async def create_transcript(self, input_file: str, **kwargs) -> str: """ Generate an audio transcript from a user query. @@ -162,7 +162,7 @@ class OllamaAPIAdapter(LLMInterface): before_sleep=before_sleep_log(logger, logging.DEBUG), reraise=True, ) - async def transcribe_image(self, input_file: str) -> str: + async def transcribe_image(self, input_file: str, **kwargs) -> str: """ Transcribe content from an image using base64 encoding. diff --git a/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/openai/adapter.py b/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/openai/adapter.py index 407b720a8..ca9b583b7 100644 --- a/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/openai/adapter.py +++ b/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/openai/adapter.py @@ -112,7 +112,7 @@ class OpenAIAdapter(LLMInterface): reraise=True, ) async def acreate_structured_output( - self, text_input: str, system_prompt: str, response_model: Type[BaseModel] + self, text_input: str, system_prompt: str, response_model: Type[BaseModel], **kwargs ) -> BaseModel: """ Generate a response from a user query. @@ -154,6 +154,7 @@ class OpenAIAdapter(LLMInterface): api_version=self.api_version, response_model=response_model, max_retries=self.MAX_RETRIES, + **kwargs, ) except ( ContentFilterFinishReasonError, @@ -180,6 +181,7 @@ class OpenAIAdapter(LLMInterface): # api_base=self.fallback_endpoint, response_model=response_model, max_retries=self.MAX_RETRIES, + **kwargs, ) except ( ContentFilterFinishReasonError, @@ -205,7 +207,7 @@ class OpenAIAdapter(LLMInterface): reraise=True, ) def create_structured_output( - self, text_input: str, system_prompt: str, response_model: Type[BaseModel] + self, text_input: str, system_prompt: str, response_model: Type[BaseModel], **kwargs ) -> BaseModel: """ Generate a response from a user query. @@ -245,6 +247,7 @@ class OpenAIAdapter(LLMInterface): api_version=self.api_version, response_model=response_model, max_retries=self.MAX_RETRIES, + **kwargs, ) @retry( @@ -254,7 +257,7 @@ class OpenAIAdapter(LLMInterface): before_sleep=before_sleep_log(logger, logging.DEBUG), reraise=True, ) - async def create_transcript(self, input): + async def create_transcript(self, input, **kwargs): """ Generate an audio transcript from a user query. @@ -281,6 +284,7 @@ class OpenAIAdapter(LLMInterface): api_base=self.endpoint, api_version=self.api_version, max_retries=self.MAX_RETRIES, + **kwargs, ) return transcription @@ -292,7 +296,7 @@ class OpenAIAdapter(LLMInterface): before_sleep=before_sleep_log(logger, logging.DEBUG), reraise=True, ) - async def transcribe_image(self, input) -> BaseModel: + async def transcribe_image(self, input, **kwargs) -> BaseModel: """ Generate a transcription of an image from a user query. @@ -337,4 +341,5 @@ class OpenAIAdapter(LLMInterface): api_version=self.api_version, max_completion_tokens=300, max_retries=self.MAX_RETRIES, + **kwargs, ) diff --git a/cognee/tasks/graph/extract_graph_from_data.py b/cognee/tasks/graph/extract_graph_from_data.py index 2d1eca17e..5b762d40c 100644 --- a/cognee/tasks/graph/extract_graph_from_data.py +++ b/cognee/tasks/graph/extract_graph_from_data.py @@ -97,6 +97,7 @@ async def extract_graph_from_data( graph_model: Type[BaseModel], config: Config = None, custom_prompt: Optional[str] = None, + **kwargs, ) -> List[DocumentChunk]: """ Extracts and integrates a knowledge graph from the text content of document chunks using a specified graph model. @@ -111,7 +112,7 @@ async def extract_graph_from_data( chunk_graphs = await asyncio.gather( *[ - extract_content_graph(chunk.text, graph_model, custom_prompt=custom_prompt) + extract_content_graph(chunk.text, graph_model, custom_prompt=custom_prompt, **kwargs) for chunk in data_chunks ] )