fix: Replace non-existent model names and handle Neo4j index race condition
- Replace hardcoded non-existent models (gpt-5-mini, gpt-5-nano, gpt-4.1) with valid gpt-4o-mini across config files and default client - Add exception handling for EquivalentSchemaRuleAlreadyExists in Neo4j index creation to prevent race condition failures on startup
This commit is contained in:
parent
2d262cf22d
commit
deb093ca0c
7 changed files with 24 additions and 15 deletions
|
|
@ -19,6 +19,7 @@ from collections.abc import Coroutine
|
|||
from typing import Any
|
||||
|
||||
from neo4j import AsyncGraphDatabase, EagerResult
|
||||
from neo4j.exceptions import ClientError
|
||||
from typing_extensions import LiteralString
|
||||
|
||||
from graphiti_core.driver.driver import GraphDriver, GraphDriverSession, GraphProvider
|
||||
|
|
@ -88,6 +89,21 @@ class Neo4jDriver(GraphDriver):
|
|||
'CALL db.indexes() YIELD name DROP INDEX name',
|
||||
)
|
||||
|
||||
async def _execute_index_query(self, query: LiteralString) -> EagerResult | None:
|
||||
"""Execute an index creation query, ignoring 'index already exists' errors.
|
||||
|
||||
Neo4j can raise EquivalentSchemaRuleAlreadyExists when concurrent CREATE INDEX
|
||||
IF NOT EXISTS queries race, even though the index exists. This is safe to ignore.
|
||||
"""
|
||||
try:
|
||||
return await self.execute_query(query)
|
||||
except ClientError as e:
|
||||
# Ignore "equivalent index already exists" error (race condition with IF NOT EXISTS)
|
||||
if 'EquivalentSchemaRuleAlreadyExists' in str(e):
|
||||
logger.debug(f'Index already exists (concurrent creation): {query[:50]}...')
|
||||
return None
|
||||
raise
|
||||
|
||||
async def build_indices_and_constraints(self, delete_existing: bool = False):
|
||||
if delete_existing:
|
||||
await self.delete_all_indexes()
|
||||
|
|
@ -98,14 +114,7 @@ class Neo4jDriver(GraphDriver):
|
|||
|
||||
index_queries: list[LiteralString] = range_indices + fulltext_indices
|
||||
|
||||
await semaphore_gather(
|
||||
*[
|
||||
self.execute_query(
|
||||
query,
|
||||
)
|
||||
for query in index_queries
|
||||
]
|
||||
)
|
||||
await semaphore_gather(*[self._execute_index_query(query) for query in index_queries])
|
||||
|
||||
async def health_check(self) -> None:
|
||||
"""Check Neo4j connectivity by running the driver's verify_connectivity method."""
|
||||
|
|
|
|||
|
|
@ -31,8 +31,8 @@ from .errors import RateLimitError, RefusalError
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_MODEL = 'gpt-5-mini'
|
||||
DEFAULT_SMALL_MODEL = 'gpt-5-nano'
|
||||
DEFAULT_MODEL = 'gpt-4o-mini'
|
||||
DEFAULT_SMALL_MODEL = 'gpt-4o-mini'
|
||||
DEFAULT_REASONING = 'minimal'
|
||||
DEFAULT_VERBOSITY = 'low'
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ server:
|
|||
|
||||
llm:
|
||||
provider: "openai" # Options: openai, azure_openai, anthropic, gemini, groq
|
||||
model: "gpt-5-mini"
|
||||
model: "gpt-4o-mini"
|
||||
max_tokens: 4096
|
||||
|
||||
providers:
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ server:
|
|||
|
||||
llm:
|
||||
provider: "openai" # Options: openai, azure_openai, anthropic, gemini, groq
|
||||
model: "gpt-5-mini"
|
||||
model: "gpt-4o-mini"
|
||||
max_tokens: 4096
|
||||
|
||||
providers:
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ server:
|
|||
|
||||
llm:
|
||||
provider: "openai" # Options: openai, azure_openai, anthropic, gemini, groq
|
||||
model: "gpt-5-mini"
|
||||
model: "gpt-4o-mini"
|
||||
max_tokens: 4096
|
||||
|
||||
providers:
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ server:
|
|||
|
||||
llm:
|
||||
provider: "openai" # Options: openai, azure_openai, anthropic, gemini, groq
|
||||
model: "gpt-5-mini"
|
||||
model: "gpt-4o-mini"
|
||||
max_tokens: 4096
|
||||
|
||||
providers:
|
||||
|
|
|
|||
|
|
@ -147,7 +147,7 @@ class LLMConfig(BaseModel):
|
|||
"""LLM configuration."""
|
||||
|
||||
provider: str = Field(default='openai', description='LLM provider')
|
||||
model: str = Field(default='gpt-4.1', description='Model name')
|
||||
model: str = Field(default='gpt-4o-mini', description='Model name')
|
||||
temperature: float | None = Field(
|
||||
default=None, description='Temperature (optional, defaults to None for reasoning models)'
|
||||
)
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue