conductor-checkpoint-msg_01Gn6qZrD3DZd8c6a6fmMap7
This commit is contained in:
parent
4573beaa3c
commit
00b579ff17
5 changed files with 25 additions and 42 deletions
|
|
@ -165,8 +165,16 @@ class BaseOpenAIClient(LLMClient):
|
||||||
raise Exception(f'Output length exceeded max tokens {self.max_tokens}: {e}') from e
|
raise Exception(f'Output length exceeded max tokens {self.max_tokens}: {e}') from e
|
||||||
except openai.RateLimitError as e:
|
except openai.RateLimitError as e:
|
||||||
raise RateLimitError from e
|
raise RateLimitError from e
|
||||||
|
except openai.AuthenticationError as e:
|
||||||
|
logger.error(f'OpenAI Authentication Error: {e}. Please verify your API key is correct.')
|
||||||
|
raise
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f'Error in generating LLM response: {e}')
|
# Provide more context for connection errors
|
||||||
|
error_msg = str(e)
|
||||||
|
if 'Connection error' in error_msg or 'connection' in error_msg.lower():
|
||||||
|
logger.error(f'Connection error communicating with OpenAI API. Please check your network connection and API key. Error: {e}')
|
||||||
|
else:
|
||||||
|
logger.error(f'Error in generating LLM response: {e}')
|
||||||
raise
|
raise
|
||||||
|
|
||||||
async def generate_response(
|
async def generate_response(
|
||||||
|
|
|
||||||
|
|
@ -22,7 +22,7 @@ services:
|
||||||
dockerfile: docker/Dockerfile
|
dockerfile: docker/Dockerfile
|
||||||
env_file:
|
env_file:
|
||||||
- path: ../.env
|
- path: ../.env
|
||||||
required: false # Makes the file optional. Default value is 'true'
|
required: false
|
||||||
depends_on:
|
depends_on:
|
||||||
falkordb:
|
falkordb:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
|
@ -31,18 +31,6 @@ services:
|
||||||
- FALKORDB_URI=${FALKORDB_URI:-redis://falkordb:6379}
|
- FALKORDB_URI=${FALKORDB_URI:-redis://falkordb:6379}
|
||||||
- FALKORDB_PASSWORD=${FALKORDB_PASSWORD:-}
|
- FALKORDB_PASSWORD=${FALKORDB_PASSWORD:-}
|
||||||
- FALKORDB_DATABASE=${FALKORDB_DATABASE:-default_db}
|
- FALKORDB_DATABASE=${FALKORDB_DATABASE:-default_db}
|
||||||
# LLM provider configurations
|
|
||||||
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
|
|
||||||
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
|
|
||||||
- GOOGLE_API_KEY=${GOOGLE_API_KEY:-}
|
|
||||||
- GROQ_API_KEY=${GROQ_API_KEY:-}
|
|
||||||
- AZURE_OPENAI_API_KEY=${AZURE_OPENAI_API_KEY:-}
|
|
||||||
- AZURE_OPENAI_ENDPOINT=${AZURE_OPENAI_ENDPOINT:-}
|
|
||||||
- AZURE_OPENAI_DEPLOYMENT=${AZURE_OPENAI_DEPLOYMENT:-}
|
|
||||||
# Embedder provider configurations
|
|
||||||
- VOYAGE_API_KEY=${VOYAGE_API_KEY:-}
|
|
||||||
- AZURE_OPENAI_EMBEDDINGS_ENDPOINT=${AZURE_OPENAI_EMBEDDINGS_ENDPOINT:-}
|
|
||||||
- AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT=${AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT:-}
|
|
||||||
# Application configuration
|
# Application configuration
|
||||||
- GRAPHITI_GROUP_ID=${GRAPHITI_GROUP_ID:-main}
|
- GRAPHITI_GROUP_ID=${GRAPHITI_GROUP_ID:-main}
|
||||||
- SEMAPHORE_LIMIT=${SEMAPHORE_LIMIT:-10}
|
- SEMAPHORE_LIMIT=${SEMAPHORE_LIMIT:-10}
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,7 @@ services:
|
||||||
dockerfile: docker/Dockerfile
|
dockerfile: docker/Dockerfile
|
||||||
env_file:
|
env_file:
|
||||||
- path: ../.env
|
- path: ../.env
|
||||||
required: false # Makes the file optional. Default value is 'true'
|
required: false
|
||||||
depends_on:
|
depends_on:
|
||||||
neo4j:
|
neo4j:
|
||||||
condition: service_healthy
|
condition: service_healthy
|
||||||
|
|
@ -36,18 +36,6 @@ services:
|
||||||
- NEO4J_USER=${NEO4J_USER:-neo4j}
|
- NEO4J_USER=${NEO4J_USER:-neo4j}
|
||||||
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-demodemo}
|
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-demodemo}
|
||||||
- NEO4J_DATABASE=${NEO4J_DATABASE:-neo4j}
|
- NEO4J_DATABASE=${NEO4J_DATABASE:-neo4j}
|
||||||
# LLM provider configurations
|
|
||||||
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
|
|
||||||
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
|
|
||||||
- GOOGLE_API_KEY=${GOOGLE_API_KEY:-}
|
|
||||||
- GROQ_API_KEY=${GROQ_API_KEY:-}
|
|
||||||
- AZURE_OPENAI_API_KEY=${AZURE_OPENAI_API_KEY:-}
|
|
||||||
- AZURE_OPENAI_ENDPOINT=${AZURE_OPENAI_ENDPOINT:-}
|
|
||||||
- AZURE_OPENAI_DEPLOYMENT=${AZURE_OPENAI_DEPLOYMENT:-}
|
|
||||||
# Embedder provider configurations
|
|
||||||
- VOYAGE_API_KEY=${VOYAGE_API_KEY:-}
|
|
||||||
- AZURE_OPENAI_EMBEDDINGS_ENDPOINT=${AZURE_OPENAI_EMBEDDINGS_ENDPOINT:-}
|
|
||||||
- AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT=${AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT:-}
|
|
||||||
# Application configuration
|
# Application configuration
|
||||||
- GRAPHITI_GROUP_ID=${GRAPHITI_GROUP_ID:-main}
|
- GRAPHITI_GROUP_ID=${GRAPHITI_GROUP_ID:-main}
|
||||||
- SEMAPHORE_LIMIT=${SEMAPHORE_LIMIT:-10}
|
- SEMAPHORE_LIMIT=${SEMAPHORE_LIMIT:-10}
|
||||||
|
|
|
||||||
|
|
@ -9,20 +9,8 @@ services:
|
||||||
required: false
|
required: false
|
||||||
environment:
|
environment:
|
||||||
# Database configuration for KuzuDB - using persistent storage
|
# Database configuration for KuzuDB - using persistent storage
|
||||||
- KUZU_DB=${KUZU_DB:-/data/graphiti.kuzu}
|
- KUZU_DB=/data/graphiti.kuzu
|
||||||
- KUZU_MAX_CONCURRENT_QUERIES=${KUZU_MAX_CONCURRENT_QUERIES:-10}
|
- KUZU_MAX_CONCURRENT_QUERIES=10
|
||||||
# LLM provider configurations
|
|
||||||
- OPENAI_API_KEY=${OPENAI_API_KEY:-}
|
|
||||||
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
|
|
||||||
- GOOGLE_API_KEY=${GOOGLE_API_KEY:-}
|
|
||||||
- GROQ_API_KEY=${GROQ_API_KEY:-}
|
|
||||||
- AZURE_OPENAI_API_KEY=${AZURE_OPENAI_API_KEY:-}
|
|
||||||
- AZURE_OPENAI_ENDPOINT=${AZURE_OPENAI_ENDPOINT:-}
|
|
||||||
- AZURE_OPENAI_DEPLOYMENT=${AZURE_OPENAI_DEPLOYMENT:-}
|
|
||||||
# Embedder provider configurations
|
|
||||||
- VOYAGE_API_KEY=${VOYAGE_API_KEY:-}
|
|
||||||
- AZURE_OPENAI_EMBEDDINGS_ENDPOINT=${AZURE_OPENAI_EMBEDDINGS_ENDPOINT:-}
|
|
||||||
- AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT=${AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT:-}
|
|
||||||
# Application configuration
|
# Application configuration
|
||||||
- GRAPHITI_GROUP_ID=${GRAPHITI_GROUP_ID:-main}
|
- GRAPHITI_GROUP_ID=${GRAPHITI_GROUP_ID:-main}
|
||||||
- SEMAPHORE_LIMIT=${SEMAPHORE_LIMIT:-10}
|
- SEMAPHORE_LIMIT=${SEMAPHORE_LIMIT:-10}
|
||||||
|
|
|
||||||
|
|
@ -85,6 +85,9 @@ class LLMClientFactory:
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def create(config: LLMConfig) -> LLMClient:
|
def create(config: LLMConfig) -> LLMClient:
|
||||||
"""Create an LLM client based on the configured provider."""
|
"""Create an LLM client based on the configured provider."""
|
||||||
|
import logging
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
provider = config.provider.lower()
|
provider = config.provider.lower()
|
||||||
|
|
||||||
match provider:
|
match provider:
|
||||||
|
|
@ -92,6 +95,14 @@ class LLMClientFactory:
|
||||||
if not config.providers.openai:
|
if not config.providers.openai:
|
||||||
raise ValueError('OpenAI provider configuration not found')
|
raise ValueError('OpenAI provider configuration not found')
|
||||||
|
|
||||||
|
api_key = config.providers.openai.api_key
|
||||||
|
if not api_key:
|
||||||
|
raise ValueError('OpenAI API key is not configured. Please set OPENAI_API_KEY environment variable.')
|
||||||
|
|
||||||
|
# Log masked API key for debugging
|
||||||
|
masked_key = f'{api_key[:7]}...{api_key[-4:]}' if len(api_key) > 11 else 'sk-***'
|
||||||
|
logger.info(f'Creating OpenAI client with API key: {masked_key}')
|
||||||
|
|
||||||
from graphiti_core.llm_client.config import LLMConfig as CoreLLMConfig
|
from graphiti_core.llm_client.config import LLMConfig as CoreLLMConfig
|
||||||
|
|
||||||
# Determine appropriate small model based on main model type
|
# Determine appropriate small model based on main model type
|
||||||
|
|
@ -107,7 +118,7 @@ class LLMClientFactory:
|
||||||
small_model = 'gpt-4.1-mini' # Use non-reasoning model for small tasks
|
small_model = 'gpt-4.1-mini' # Use non-reasoning model for small tasks
|
||||||
|
|
||||||
llm_config = CoreLLMConfig(
|
llm_config = CoreLLMConfig(
|
||||||
api_key=config.providers.openai.api_key,
|
api_key=api_key,
|
||||||
model=config.model,
|
model=config.model,
|
||||||
small_model=small_model,
|
small_model=small_model,
|
||||||
temperature=config.temperature,
|
temperature=config.temperature,
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue