From 4bae611721b47ddf8cdba3d74e45b4fa30abaf22 Mon Sep 17 00:00:00 2001 From: Igor Ilic Date: Tue, 9 Sep 2025 18:11:34 +0200 Subject: [PATCH] fix: Resolve issue with BAML not working without LLM_API_KEY set fro LiteLLM --- .../litellm_instructor/llm/get_llm_client.py | 10 +++++----- cognee/infrastructure/llm/utils.py | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/get_llm_client.py b/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/get_llm_client.py index 44679dcf7..049b27e04 100644 --- a/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/get_llm_client.py +++ b/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/get_llm_client.py @@ -32,7 +32,7 @@ class LLMProvider(Enum): GEMINI = "gemini" -def get_llm_client(): +def get_llm_client(raise_api_key_error: bool = True): """ Get the LLM client based on the configuration using Enums. @@ -65,7 +65,7 @@ def get_llm_client(): ) if provider == LLMProvider.OPENAI: - if llm_config.llm_api_key is None: + if llm_config.llm_api_key is None and raise_api_key_error: raise LLMAPIKeyNotSetError() from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.openai.adapter import ( @@ -86,7 +86,7 @@ def get_llm_client(): ) elif provider == LLMProvider.OLLAMA: - if llm_config.llm_api_key is None: + if llm_config.llm_api_key is None and raise_api_key_error: raise LLMAPIKeyNotSetError() from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.generic_llm_api.adapter import ( @@ -111,7 +111,7 @@ def get_llm_client(): ) elif provider == LLMProvider.CUSTOM: - if llm_config.llm_api_key is None: + if llm_config.llm_api_key is None and raise_api_key_error: raise LLMAPIKeyNotSetError() from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.generic_llm_api.adapter import ( @@ -130,7 +130,7 @@ def get_llm_client(): ) elif provider == LLMProvider.GEMINI: - if llm_config.llm_api_key is None: + if llm_config.llm_api_key is None and raise_api_key_error: raise LLMAPIKeyNotSetError() from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.gemini.adapter import ( diff --git a/cognee/infrastructure/llm/utils.py b/cognee/infrastructure/llm/utils.py index 7dd16d7ba..6c2f4537f 100644 --- a/cognee/infrastructure/llm/utils.py +++ b/cognee/infrastructure/llm/utils.py @@ -29,7 +29,7 @@ def get_max_chunk_tokens(): # Calculate max chunk size based on the following formula embedding_engine = get_vector_engine().embedding_engine - llm_client = get_llm_client() + llm_client = get_llm_client(raise_api_key_error=False) # We need to make sure chunk size won't take more than half of LLM max context token size # but it also can't be bigger than the embedding engine max token size