From fd6a77deecc853e2a970b4d8569527aa24bae5b9 Mon Sep 17 00:00:00 2001 From: Igor Ilic Date: Thu, 8 Jan 2026 13:31:25 +0100 Subject: [PATCH] refactor: Add TODO for missing llm config parameters --- .../litellm_instructor/llm/get_llm_client.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/get_llm_client.py b/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/get_llm_client.py index dc0fd995a..1ddb9c480 100644 --- a/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/get_llm_client.py +++ b/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/get_llm_client.py @@ -194,6 +194,7 @@ def get_llm_client(raise_api_key_error: bool = True): ) # Get optional local mode parameters (will be None if not set) + # TODO: refactor llm_config to include these parameters, currently they cannot be defined and defaults are used model_path = getattr(llm_config, "llama_cpp_model_path", None) n_ctx = getattr(llm_config, "llama_cpp_n_ctx", 2048) n_gpu_layers = getattr(llm_config, "llama_cpp_n_gpu_layers", 0)