conductor-checkpoint-msg_01Gn6qZrD3DZd8c6a6fmMap7

This commit is contained in:
Daniel Chalef 2025-10-30 08:00:23 -07:00
parent 4573beaa3c
commit 00b579ff17
5 changed files with 25 additions and 42 deletions

View file

@ -165,8 +165,16 @@ class BaseOpenAIClient(LLMClient):
raise Exception(f'Output length exceeded max tokens {self.max_tokens}: {e}') from e
except openai.RateLimitError as e:
raise RateLimitError from e
except openai.AuthenticationError as e:
logger.error(f'OpenAI Authentication Error: {e}. Please verify your API key is correct.')
raise
except Exception as e:
logger.error(f'Error in generating LLM response: {e}')
# Provide more context for connection errors
error_msg = str(e)
if 'Connection error' in error_msg or 'connection' in error_msg.lower():
logger.error(f'Connection error communicating with OpenAI API. Please check your network connection and API key. Error: {e}')
else:
logger.error(f'Error in generating LLM response: {e}')
raise
async def generate_response(

View file

@ -22,7 +22,7 @@ services:
dockerfile: docker/Dockerfile
env_file:
- path: ../.env
required: false # Makes the file optional. Default value is 'true'
required: false
depends_on:
falkordb:
condition: service_healthy
@ -31,18 +31,6 @@ services:
- FALKORDB_URI=${FALKORDB_URI:-redis://falkordb:6379}
- FALKORDB_PASSWORD=${FALKORDB_PASSWORD:-}
- FALKORDB_DATABASE=${FALKORDB_DATABASE:-default_db}
# LLM provider configurations
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
- GOOGLE_API_KEY=${GOOGLE_API_KEY:-}
- GROQ_API_KEY=${GROQ_API_KEY:-}
- AZURE_OPENAI_API_KEY=${AZURE_OPENAI_API_KEY:-}
- AZURE_OPENAI_ENDPOINT=${AZURE_OPENAI_ENDPOINT:-}
- AZURE_OPENAI_DEPLOYMENT=${AZURE_OPENAI_DEPLOYMENT:-}
# Embedder provider configurations
- VOYAGE_API_KEY=${VOYAGE_API_KEY:-}
- AZURE_OPENAI_EMBEDDINGS_ENDPOINT=${AZURE_OPENAI_EMBEDDINGS_ENDPOINT:-}
- AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT=${AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT:-}
# Application configuration
- GRAPHITI_GROUP_ID=${GRAPHITI_GROUP_ID:-main}
- SEMAPHORE_LIMIT=${SEMAPHORE_LIMIT:-10}

View file

@ -26,7 +26,7 @@ services:
dockerfile: docker/Dockerfile
env_file:
- path: ../.env
required: false # Makes the file optional. Default value is 'true'
required: false
depends_on:
neo4j:
condition: service_healthy
@ -36,18 +36,6 @@ services:
- NEO4J_USER=${NEO4J_USER:-neo4j}
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-demodemo}
- NEO4J_DATABASE=${NEO4J_DATABASE:-neo4j}
# LLM provider configurations
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
- GOOGLE_API_KEY=${GOOGLE_API_KEY:-}
- GROQ_API_KEY=${GROQ_API_KEY:-}
- AZURE_OPENAI_API_KEY=${AZURE_OPENAI_API_KEY:-}
- AZURE_OPENAI_ENDPOINT=${AZURE_OPENAI_ENDPOINT:-}
- AZURE_OPENAI_DEPLOYMENT=${AZURE_OPENAI_DEPLOYMENT:-}
# Embedder provider configurations
- VOYAGE_API_KEY=${VOYAGE_API_KEY:-}
- AZURE_OPENAI_EMBEDDINGS_ENDPOINT=${AZURE_OPENAI_EMBEDDINGS_ENDPOINT:-}
- AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT=${AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT:-}
# Application configuration
- GRAPHITI_GROUP_ID=${GRAPHITI_GROUP_ID:-main}
- SEMAPHORE_LIMIT=${SEMAPHORE_LIMIT:-10}

View file

@ -9,20 +9,8 @@ services:
required: false
environment:
# Database configuration for KuzuDB - using persistent storage
- KUZU_DB=${KUZU_DB:-/data/graphiti.kuzu}
- KUZU_MAX_CONCURRENT_QUERIES=${KUZU_MAX_CONCURRENT_QUERIES:-10}
# LLM provider configurations
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
- GOOGLE_API_KEY=${GOOGLE_API_KEY:-}
- GROQ_API_KEY=${GROQ_API_KEY:-}
- AZURE_OPENAI_API_KEY=${AZURE_OPENAI_API_KEY:-}
- AZURE_OPENAI_ENDPOINT=${AZURE_OPENAI_ENDPOINT:-}
- AZURE_OPENAI_DEPLOYMENT=${AZURE_OPENAI_DEPLOYMENT:-}
# Embedder provider configurations
- VOYAGE_API_KEY=${VOYAGE_API_KEY:-}
- AZURE_OPENAI_EMBEDDINGS_ENDPOINT=${AZURE_OPENAI_EMBEDDINGS_ENDPOINT:-}
- AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT=${AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT:-}
- KUZU_DB=/data/graphiti.kuzu
- KUZU_MAX_CONCURRENT_QUERIES=10
# Application configuration
- GRAPHITI_GROUP_ID=${GRAPHITI_GROUP_ID:-main}
- SEMAPHORE_LIMIT=${SEMAPHORE_LIMIT:-10}

View file

@ -85,6 +85,9 @@ class LLMClientFactory:
@staticmethod
def create(config: LLMConfig) -> LLMClient:
"""Create an LLM client based on the configured provider."""
import logging
logger = logging.getLogger(__name__)
provider = config.provider.lower()
match provider:
@ -92,6 +95,14 @@ class LLMClientFactory:
if not config.providers.openai:
raise ValueError('OpenAI provider configuration not found')
api_key = config.providers.openai.api_key
if not api_key:
raise ValueError('OpenAI API key is not configured. Please set OPENAI_API_KEY environment variable.')
# Log masked API key for debugging
masked_key = f'{api_key[:7]}...{api_key[-4:]}' if len(api_key) > 11 else 'sk-***'
logger.info(f'Creating OpenAI client with API key: {masked_key}')
from graphiti_core.llm_client.config import LLMConfig as CoreLLMConfig
# Determine appropriate small model based on main model type
@ -107,7 +118,7 @@ class LLMClientFactory:
small_model = 'gpt-4.1-mini' # Use non-reasoning model for small tasks
llm_config = CoreLLMConfig(
api_key=config.providers.openai.api_key,
api_key=api_key,
model=config.model,
small_model=small_model,
temperature=config.temperature,