From 62cb4175dad03d6bc528cbc7abbe073cc21ec012 Mon Sep 17 00:00:00 2001 From: Daniel Chalef <131175+danielchalef@users.noreply.github.com> Date: Wed, 29 Oct 2025 23:17:00 -0700 Subject: [PATCH] conductor-checkpoint-msg_018d52yUXdPF48UBWPQdiB4W --- graphiti_core/llm_client/openai_client.py | 12 ++---------- mcp_server/src/services/factories.py | 22 +++++++++++++++++++++- 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/graphiti_core/llm_client/openai_client.py b/graphiti_core/llm_client/openai_client.py index c94dddc5..4cb1c810 100644 --- a/graphiti_core/llm_client/openai_client.py +++ b/graphiti_core/llm_client/openai_client.py @@ -73,22 +73,14 @@ class OpenAIClient(BaseOpenAIClient): verbosity: str | None = None, ): """Create a structured completion using OpenAI's beta parse API.""" - # Only pass reasoning parameter for reasoning models (gpt-5 family: o1, o3-mini, o3, gpt-5-*) - is_reasoning_model = model.startswith('gpt-5') or model.startswith('o1') or model.startswith('o3') - - # Adjust verbosity for gpt-4.1 which only supports 'medium' - adjusted_verbosity = verbosity - if verbosity == 'low' and model.startswith('gpt-4'): - adjusted_verbosity = 'medium' - response = await self.client.responses.parse( model=model, input=messages, # type: ignore temperature=temperature, max_output_tokens=max_tokens, text_format=response_model, # type: ignore - reasoning={'effort': reasoning} if reasoning is not None and is_reasoning_model else None, # type: ignore - text={'verbosity': adjusted_verbosity} if adjusted_verbosity is not None else None, # type: ignore + reasoning={'effort': reasoning} if reasoning is not None else None, # type: ignore + text={'verbosity': verbosity} if verbosity is not None else None, # type: ignore ) return response diff --git a/mcp_server/src/services/factories.py b/mcp_server/src/services/factories.py index 784a8ccb..862da6bc 100644 --- a/mcp_server/src/services/factories.py +++ b/mcp_server/src/services/factories.py @@ -100,7 +100,27 @@ class LLMClientFactory: temperature=config.temperature, max_tokens=config.max_tokens, ) - return OpenAIClient(config=llm_config) + + # Only pass reasoning/verbosity parameters for reasoning models (gpt-5 family) + is_reasoning_model = ( + config.model.startswith('gpt-5') + or config.model.startswith('o1') + or config.model.startswith('o3') + ) + + if is_reasoning_model: + return OpenAIClient( + config=llm_config, + reasoning='minimal', + verbosity='low' + ) + else: + # For non-reasoning models, explicitly pass None to disable these parameters + return OpenAIClient( + config=llm_config, + reasoning=None, + verbosity=None + ) case 'azure_openai': if not HAS_AZURE_LLM: