Compare commits

..

No commits in common. "main" and "v0.24.2" have entirely different histories.

13 changed files with 28 additions and 136 deletions

View file

@ -18,9 +18,7 @@ import logging
from collections.abc import Coroutine
from typing import Any
import neo4j.exceptions
from neo4j import AsyncGraphDatabase, EagerResult
from neo4j.exceptions import ClientError
from typing_extensions import LiteralString
from graphiti_core.driver.driver import GraphDriver, GraphDriverSession, GraphProvider
@ -72,15 +70,6 @@ class Neo4jDriver(GraphDriver):
try:
result = await self.client.execute_query(cypher_query_, parameters_=params, **kwargs)
except neo4j.exceptions.ClientError as e:
# Handle race condition when creating indices/constraints in parallel
# Neo4j 5.26+ may throw EquivalentSchemaRuleAlreadyExists even with IF NOT EXISTS
if 'EquivalentSchemaRuleAlreadyExists' in str(e):
logger.info(f'Index or constraint already exists, continuing: {cypher_query_}')
# Return empty result to indicate success (index exists)
return EagerResult([], None, None) # type: ignore
logger.error(f'Error executing Neo4j query: {e}\n{cypher_query_}\n{params}')
raise
except Exception as e:
logger.error(f'Error executing Neo4j query: {e}\n{cypher_query_}\n{params}')
raise
@ -99,21 +88,6 @@ class Neo4jDriver(GraphDriver):
'CALL db.indexes() YIELD name DROP INDEX name',
)
async def _execute_index_query(self, query: LiteralString) -> EagerResult | None:
"""Execute an index creation query, ignoring 'index already exists' errors.
Neo4j can raise EquivalentSchemaRuleAlreadyExists when concurrent CREATE INDEX
IF NOT EXISTS queries race, even though the index exists. This is safe to ignore.
"""
try:
return await self.execute_query(query)
except ClientError as e:
# Ignore "equivalent index already exists" error (race condition with IF NOT EXISTS)
if 'EquivalentSchemaRuleAlreadyExists' in str(e):
logger.debug(f'Index already exists (concurrent creation): {query[:50]}...')
return None
raise
async def build_indices_and_constraints(self, delete_existing: bool = False):
if delete_existing:
await self.delete_all_indexes()
@ -124,7 +98,14 @@ class Neo4jDriver(GraphDriver):
index_queries: list[LiteralString] = range_indices + fulltext_indices
await semaphore_gather(*[self._execute_index_query(query) for query in index_queries])
await semaphore_gather(
*[
self.execute_query(
query,
)
for query in index_queries
]
)
async def health_check(self) -> None:
"""Check Neo4j connectivity by running the driver's verify_connectivity method."""

View file

@ -31,8 +31,8 @@ from .errors import RateLimitError, RefusalError
logger = logging.getLogger(__name__)
DEFAULT_MODEL = 'gpt-4o-mini'
DEFAULT_SMALL_MODEL = 'gpt-4o-mini'
DEFAULT_MODEL = 'gpt-5-mini'
DEFAULT_SMALL_MODEL = 'gpt-5-nano'
DEFAULT_REASONING = 'minimal'
DEFAULT_VERBOSITY = 'low'

View file

@ -41,16 +41,6 @@ class DateFilter(BaseModel):
)
class PropertyFilter(BaseModel):
property_name: str = Field(description='Property name')
property_value: str | int | float | None = Field(
description='Value you want to match on for the property'
)
comparison_operator: ComparisonOperator = Field(
description='Comparison operator for the property'
)
class SearchFilters(BaseModel):
node_labels: list[str] | None = Field(
default=None, description='List of node labels to filter on'
@ -63,7 +53,6 @@ class SearchFilters(BaseModel):
created_at: list[list[DateFilter]] | None = Field(default=None)
expired_at: list[list[DateFilter]] | None = Field(default=None)
edge_uuids: list[str] | None = Field(default=None)
property_filters: list[PropertyFilter] | None = Field(default=None)
def cypher_to_opensearch_operator(op: ComparisonOperator) -> str:

View file

@ -1,49 +0,0 @@
# Graphiti MCP Server Environment Configuration
MCP_SERVER_HOST=gmakai.online
# Neo4j Database Configuration
# These settings are used to connect to your Neo4j database
NEO4J_URI=bolt://neo4j:7687
NEO4J_USER=neo4j
NEO4J_PASSWORD=kg3Jsdb2
# OpenAI API Configuration
# Required for LLM operations
OPENAI_API_KEY=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
MODEL_NAME=gpt5.1-nano
# Optional: Only needed for non-standard OpenAI endpoints
OPENAI_BASE_URL=https://openrouter.ai/api/v1
# Optional: Group ID for namespacing graph data
# GROUP_ID=my_project
# Concurrency Control
# Controls how many episodes can be processed simultaneously
# Default: 10 (suitable for OpenAI Tier 3, mid-tier Anthropic)
# Adjust based on your LLM provider's rate limits:
# - OpenAI Tier 1 (free): 1-2
# - OpenAI Tier 2: 5-8
# - OpenAI Tier 3: 10-15
# - OpenAI Tier 4: 20-50
# - Anthropic default: 5-8
# - Anthropic high tier: 15-30
# - Ollama (local): 1-5
# See README.md "Concurrency and LLM Provider 429 Rate Limit Errors" for details
SEMAPHORE_LIMIT=10
# Optional: Path configuration for Docker
# PATH=/root/.local/bin:${PATH}
# Optional: Memory settings for Neo4j (used in Docker Compose)
# NEO4J_server_memory_heap_initial__size=512m
# NEO4J_server_memory_heap_max__size=1G
# NEO4J_server_memory_pagecache_size=512m
# Azure OpenAI configuration
# Optional: Only needed for Azure OpenAI endpoints
# AZURE_OPENAI_ENDPOINT=your_azure_openai_endpoint_here
# AZURE_OPENAI_API_VERSION=2025-01-01-preview
# AZURE_OPENAI_DEPLOYMENT_NAME=gpt-4o-gpt-4o-mini-deployment
# AZURE_OPENAI_EMBEDDING_API_VERSION=2023-05-15
# AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME=text-embedding-3-large-deployment
# AZURE_OPENAI_USE_MANAGED_IDENTITY=false

View file

@ -8,7 +8,7 @@ server:
llm:
provider: "openai" # Options: openai, azure_openai, anthropic, gemini, groq
model: "gpt-4o-mini"
model: "gpt-5-mini"
max_tokens: 4096
providers:

View file

@ -8,7 +8,7 @@ server:
llm:
provider: "openai" # Options: openai, azure_openai, anthropic, gemini, groq
model: "gpt-4o-mini"
model: "gpt-5-mini"
max_tokens: 4096
providers:

View file

@ -8,7 +8,7 @@ server:
llm:
provider: "openai" # Options: openai, azure_openai, anthropic, gemini, groq
model: "gpt-4o-mini"
model: "gpt-5-mini"
max_tokens: 4096
providers:

View file

@ -12,7 +12,7 @@ server:
llm:
provider: "openai" # Options: openai, azure_openai, anthropic, gemini, groq
model: "gpt-4o-mini"
model: "gpt-5-mini"
max_tokens: 4096
providers:

View file

@ -1,23 +1,23 @@
services:
neo4j:
image: neo4j:latest
image: neo4j:5.26.0
ports:
- "7474:7474" # HTTP
- "7687:7687" # Bolt
environment:
- NEO4J_AUTH=${NEO4J_USER:-neo4j}/${NEO4J_PASSWORD:-kg3Jsdb2}
- NEO4J_AUTH=${NEO4J_USER:-neo4j}/${NEO4J_PASSWORD:-demodemo}
- NEO4J_server_memory_heap_initial__size=512m
- NEO4J_server_memory_heap_max__size=1G
- NEO4J_server_memory_pagecache_size=512m
volumes:
- /data/neo4j/data:/data
- /data/neo4j/logs:/logs
- /data/neo4j/plugins:/plugins
- /data/neo4j/config:/config
- neo4j_data:/data
- neo4j_logs:/logs
healthcheck:
test: ["CMD", "wget", "-O", "/dev/null", "http://localhost:7474"]
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
restart: always
graphiti-mcp:
image: zepai/knowledge-graph-mcp:standalone
@ -27,9 +27,9 @@ services:
build:
context: ..
dockerfile: docker/Dockerfile.standalone
#env_file:
# - path: ../.env
# required: true
env_file:
- path: ../.env
required: false
depends_on:
neo4j:
condition: service_healthy
@ -37,18 +37,13 @@ services:
# Database configuration
- NEO4J_URI=${NEO4J_URI:-bolt://neo4j:7687}
- NEO4J_USER=${NEO4J_USER:-neo4j}
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-kg3Jsdb2}
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-demodemo}
- NEO4J_DATABASE=${NEO4J_DATABASE:-neo4j}
# Application configuration
- GRAPHITI_GROUP_ID=${GRAPHITI_GROUP_ID:-main}
- SEMAPHORE_LIMIT=${SEMAPHORE_LIMIT:-10}
- CONFIG_PATH=/app/mcp/config/config.yaml
- PATH=/root/.local/bin:${PATH}
- MCP_SERVER_HOST=gmakai.online
- OPENAI_API_KEY=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
- MODEL_NAME=gpt5.1-nano
- OPENAI_BASE_URL=https://openrouter.ai/api/v1
volumes:
- ../config/config-docker-neo4j.yaml:/app/mcp/config/config.yaml:ro
ports:

View file

@ -147,7 +147,7 @@ class LLMConfig(BaseModel):
"""LLM configuration."""
provider: str = Field(default='openai', description='LLM provider')
model: str = Field(default='gpt-4o-mini', description='Model name')
model: str = Field(default='gpt-4.1', description='Model name')
temperature: float | None = Field(
default=None, description='Temperature (optional, defaults to None for reasoning models)'
)

View file

@ -1,7 +1,7 @@
[project]
name = "graphiti-core"
description = "A temporal graph building library"
version = "0.24.3"
version = "0.24.1"
authors = [
{ name = "Paul Paliychuk", email = "paul@getzep.com" },
{ name = "Preston Rasmussen", email = "preston@getzep.com" },

View file

@ -487,30 +487,6 @@
"created_at": "2025-11-27T02:45:53Z",
"repoId": 840056306,
"pullRequestNo": 1085
},
{
"name": "ronaldmego",
"id": 17481958,
"comment_id": 3617267429,
"created_at": "2025-12-05T14:59:42Z",
"repoId": 840056306,
"pullRequestNo": 1094
},
{
"name": "NShumway",
"id": 29358113,
"comment_id": 3634967978,
"created_at": "2025-12-10T01:26:49Z",
"repoId": 840056306,
"pullRequestNo": 1102
},
{
"name": "husniadil",
"id": 10581130,
"comment_id": 3650156180,
"created_at": "2025-12-14T03:37:59Z",
"repoId": 840056306,
"pullRequestNo": 1105
}
]
}

2
uv.lock generated
View file

@ -808,7 +808,7 @@ wheels = [
[[package]]
name = "graphiti-core"
version = "0.24.3"
version = "0.24.1"
source = { editable = "." }
dependencies = [
{ name = "diskcache" },