Compare commits

...

24 commits

Author SHA1 Message Date
032c2a20c6 Update mcp_server/.env
Some checks failed
CodeQL Advanced / Analyze (actions) (push) Has been cancelled
CodeQL Advanced / Analyze (python) (push) Has been cancelled
Lint with Ruff / ruff (push) Has been cancelled
Pyright Type Check / pyright (push) Has been cancelled
Tests / unit-tests (push) Has been cancelled
Tests / database-integration-tests (push) Has been cancelled
2025-12-15 13:55:37 +00:00
323bd57c9b Update mcp_server/docker/docker-compose-neo4j.yml
Some checks are pending
CodeQL Advanced / Analyze (actions) (push) Waiting to run
CodeQL Advanced / Analyze (python) (push) Waiting to run
Lint with Ruff / ruff (push) Waiting to run
Pyright Type Check / pyright (push) Waiting to run
Tests / unit-tests (push) Waiting to run
Tests / database-integration-tests (push) Waiting to run
2025-12-15 13:54:43 +00:00
8bbf068eae Update mcp_server/docker/docker-compose-neo4j.yml
Some checks are pending
CodeQL Advanced / Analyze (actions) (push) Waiting to run
CodeQL Advanced / Analyze (python) (push) Waiting to run
Lint with Ruff / ruff (push) Waiting to run
Pyright Type Check / pyright (push) Waiting to run
Tests / unit-tests (push) Waiting to run
Tests / database-integration-tests (push) Waiting to run
2025-12-14 19:59:39 +00:00
f3132dc763 Update mcp_server/docker/docker-compose-neo4j.yml
Some checks are pending
CodeQL Advanced / Analyze (actions) (push) Waiting to run
CodeQL Advanced / Analyze (python) (push) Waiting to run
Lint with Ruff / ruff (push) Waiting to run
Pyright Type Check / pyright (push) Waiting to run
Tests / unit-tests (push) Waiting to run
Tests / database-integration-tests (push) Waiting to run
2025-12-14 19:53:20 +00:00
efd21d6c7e Update mcp_server/docker/docker-compose-neo4j.yml
Some checks are pending
CodeQL Advanced / Analyze (actions) (push) Waiting to run
CodeQL Advanced / Analyze (python) (push) Waiting to run
Lint with Ruff / ruff (push) Waiting to run
Pyright Type Check / pyright (push) Waiting to run
Tests / unit-tests (push) Waiting to run
Tests / database-integration-tests (push) Waiting to run
2025-12-14 19:48:10 +00:00
8f4c1ea8b9 Update mcp_server/docker/docker-compose-neo4j.yml
Some checks are pending
CodeQL Advanced / Analyze (actions) (push) Waiting to run
CodeQL Advanced / Analyze (python) (push) Waiting to run
Lint with Ruff / ruff (push) Waiting to run
Pyright Type Check / pyright (push) Waiting to run
Tests / unit-tests (push) Waiting to run
Tests / database-integration-tests (push) Waiting to run
2025-12-14 19:41:34 +00:00
59802cfae5 Update mcp_server/.env
Some checks are pending
CodeQL Advanced / Analyze (actions) (push) Waiting to run
CodeQL Advanced / Analyze (python) (push) Waiting to run
Lint with Ruff / ruff (push) Waiting to run
Pyright Type Check / pyright (push) Waiting to run
Tests / database-integration-tests (push) Waiting to run
Tests / unit-tests (push) Waiting to run
2025-12-14 19:41:01 +00:00
b5a8227035 Update graphiti_core/driver/neo4j_driver.py
Some checks are pending
CodeQL Advanced / Analyze (actions) (push) Waiting to run
CodeQL Advanced / Analyze (python) (push) Waiting to run
Lint with Ruff / ruff (push) Waiting to run
Pyright Type Check / pyright (push) Waiting to run
Tests / unit-tests (push) Waiting to run
Tests / database-integration-tests (push) Waiting to run
2025-12-14 19:23:08 +00:00
becd9107f9 Update mcp_server/.env
Some checks are pending
CodeQL Advanced / Analyze (actions) (push) Waiting to run
CodeQL Advanced / Analyze (python) (push) Waiting to run
Lint with Ruff / ruff (push) Waiting to run
Pyright Type Check / pyright (push) Waiting to run
Tests / unit-tests (push) Waiting to run
Tests / database-integration-tests (push) Waiting to run
2025-12-14 19:21:10 +00:00
6a5e4a356e Update mcp_server/.env
Some checks are pending
CodeQL Advanced / Analyze (actions) (push) Waiting to run
CodeQL Advanced / Analyze (python) (push) Waiting to run
Lint with Ruff / ruff (push) Waiting to run
Pyright Type Check / pyright (push) Waiting to run
Tests / unit-tests (push) Waiting to run
Tests / database-integration-tests (push) Waiting to run
2025-12-14 19:20:37 +00:00
c5d7597319 Add mcp_server/.env
Some checks are pending
CodeQL Advanced / Analyze (actions) (push) Waiting to run
CodeQL Advanced / Analyze (python) (push) Waiting to run
Lint with Ruff / ruff (push) Waiting to run
Pyright Type Check / pyright (push) Waiting to run
Tests / unit-tests (push) Waiting to run
Tests / database-integration-tests (push) Waiting to run
2025-12-14 19:10:44 +00:00
Daniel Chalef
d6ff7bb78c @husniadil has signed the CLA in getzep/graphiti#1105 2025-12-13 19:38:11 -08:00
Daniel Chalef
5e593dd096 @NShumway has signed the CLA in getzep/graphiti#1102 2025-12-09 17:26:59 -08:00
Ronald Mego Solano
1de752646a
Fix/model name config (#1094)
fix: Replace non-existent model names and handle Neo4j index race condition

- Replace hardcoded non-existent models (gpt-5-mini, gpt-5-nano, gpt-4.1)
  with valid gpt-4o-mini across config files and default client
- Add exception handling for EquivalentSchemaRuleAlreadyExists in
  Neo4j index creation to prevent race condition failures on startup
2025-12-08 11:31:04 -05:00
Preston Rasmussen
24105b9556
add property filters (#1099)
* add property filters

* update
2025-12-08 10:50:19 -05:00
Daniel Chalef
2d50fa565d @ronaldmego has signed the CLA in getzep/graphiti#1094 2025-12-05 06:59:53 -08:00
Preston Rasmussen
6dc7b8800b
bump (#1093)
update
2025-12-04 13:11:33 -05:00
Preston Rasmussen
2d262cf22d
foreign language fix (#1090) 2025-12-03 11:47:12 -05:00
Daniel Chalef
bddd469167
Disable issue triage and daily maintenance workflows (#1089)
Remove automated issue triage and duplicate detection workflows to reduce
automated issue management.

- Remove .github/workflows/issue-triage.yml
- Remove .github/workflows/daily_issue_maintenance.yml

🤖 Generated with Claude Code

Co-authored-by: Claude <noreply@anthropic.com>
2025-12-02 15:50:07 -08:00
Daniel Chalef
422558d06c @ZLBillShaw has signed the CLA in getzep/graphiti#1085 2025-11-26 18:46:03 -08:00
Daniel Chalef
3ce3962e4b @apetti1920 has signed the CLA in getzep/graphiti#1084 2025-11-24 13:07:46 -08:00
Daniel Chalef
352977a002 @donbr has signed the CLA in getzep/graphiti#1081 2025-11-23 21:19:55 -08:00
Daniel Chalef
582e093582 @supmo668 has signed the CLA in getzep/graphiti#1072 2025-11-23 17:13:50 -08:00
Daniel Chalef
ae78828f9c
fix: replace deprecated gemini-2.5-flash-lite-preview with gemini-2.5-flash-lite (#1076)
fix: replace deprecated gemini-2.5-flash-lite-preview-06-17 with gemini-2.5-flash-lite

Updated all references to the deprecated Gemini model in:
- graphiti_core/llm_client/gemini_client.py
- graphiti_core/cross_encoder/gemini_reranker_client.py
- tests/llm_client/test_gemini_client.py
- README.md

This resolves 404 errors when using Gemini clients.

Fixes #1075

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-authored-by: claude[bot] <41898282+claude[bot]@users.noreply.github.com>
Co-authored-by: Daniel Chalef <danielchalef@users.noreply.github.com>
2025-11-20 16:03:51 -08:00
20 changed files with 177 additions and 299 deletions

View file

@ -1,123 +0,0 @@
name: Daily Issue Maintenance
on:
schedule:
- cron: "0 0 * * *" # Every day at midnight
workflow_dispatch: # Manual trigger option
jobs:
find-legacy-duplicates:
runs-on: ubuntu-latest
if: github.event_name == 'workflow_dispatch'
permissions:
contents: read
issues: write
id-token: write
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 1
- uses: anthropics/claude-code-action@v1
with:
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
prompt: |
REPO: ${{ github.repository }}
Find potential duplicate issues in the repository:
1. Use `gh issue list --state open --limit 1000 --json number,title,body,createdAt` to get all open issues
2. For each issue, search for potential duplicates using `gh search issues` with keywords from the title and body
3. Compare issues to identify true duplicates using these criteria:
- Same bug or error being reported
- Same feature request (even if worded differently)
- Same question being asked
- Issues describing the same root problem
For each duplicate found:
- Add a comment linking to the original issue
- Apply the "duplicate" label using `gh issue edit`
- Be polite and explain why it's a duplicate
Focus on finding true duplicates, not just similar issues.
claude_args: |
--allowedTools "Bash(gh issue:*),Bash(gh search:*)"
--model claude-sonnet-4-5-20250929
check-stale-issues:
runs-on: ubuntu-latest
if: github.event_name == 'schedule'
permissions:
contents: read
issues: write
id-token: write
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 1
- uses: anthropics/claude-code-action@v1
with:
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
prompt: |
REPO: ${{ github.repository }}
Review stale issues and request confirmation:
1. Use `gh issue list --state open --limit 1000 --json number,title,updatedAt,comments` to get all open issues
2. Identify issues that are:
- Older than 60 days (based on updatedAt)
- Have no comments with "stale-check" label
- Are not labeled as "enhancement" or "documentation"
3. For each stale issue:
- Add a polite comment asking the issue originator if this is still relevant
- Apply a "stale-check" label to track that we've asked
- Use format: "@{author} Is this still an issue? Please confirm within 14 days or this issue will be closed."
Use:
- `gh issue view` to check issue details and labels
- `gh issue comment` to add comments
- `gh issue edit` to add the "stale-check" label
claude_args: |
--allowedTools "Bash(gh issue:*)"
--model claude-sonnet-4-5-20250929
close-unconfirmed-issues:
runs-on: ubuntu-latest
if: github.event_name == 'schedule'
needs: check-stale-issues
permissions:
contents: read
issues: write
id-token: write
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 1
- uses: anthropics/claude-code-action@v1
with:
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
prompt: |
REPO: ${{ github.repository }}
Close unconfirmed stale issues:
1. Use `gh issue list --state open --label "stale-check" --limit 1000 --json number,title,comments,updatedAt` to get issues with stale-check label
2. For each issue, check if:
- The "stale-check" comment was added 14+ days ago
- There has been no response from the issue author or activity since the comment
3. For issues meeting the criteria:
- Add a polite closing comment
- Close the issue using `gh issue close`
- Use format: "Closing due to inactivity. Feel free to reopen if this is still relevant."
Use:
- `gh issue view` to check issue comments and activity
- `gh issue comment` to add closing comment
- `gh issue close` to close the issue
claude_args: |
--allowedTools "Bash(gh issue:*)"
--model claude-sonnet-4-5-20250929

View file

@ -1,141 +0,0 @@
name: Issue Triage and Deduplication
on:
issues:
types: [opened]
jobs:
triage:
runs-on: ubuntu-latest
timeout-minutes: 10
permissions:
contents: read
issues: write
id-token: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Run Claude Code for Issue Triage
uses: anthropics/claude-code-action@v1
with:
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
allowed_non_write_users: "*"
github_token: ${{ secrets.GITHUB_TOKEN }}
prompt: |
You're an issue triage assistant for GitHub issues. Your task is to analyze the issue and select appropriate labels from the provided list.
IMPORTANT: Don't post any comments or messages to the issue. Your only action should be to apply labels. DO NOT check for duplicates - that's handled by a separate job.
Issue Information:
- REPO: ${{ github.repository }}
- ISSUE_NUMBER: ${{ github.event.issue.number }}
TASK OVERVIEW:
1. First, fetch the list of labels available in this repository by running: `gh label list`. Run exactly this command with nothing else.
2. Next, use gh commands to get context about the issue:
- Use `gh issue view ${{ github.event.issue.number }}` to retrieve the current issue's details
- Use `gh search issues` to find similar issues that might provide context for proper categorization
- You have access to these Bash commands:
- Bash(gh label list:*) - to get available labels
- Bash(gh issue view:*) - to view issue details
- Bash(gh issue edit:*) - to apply labels to the issue
- Bash(gh search:*) - to search for similar issues
3. Analyze the issue content, considering:
- The issue title and description
- The type of issue (bug report, feature request, question, etc.)
- Technical areas mentioned
- Database mentions (neo4j, falkordb, neptune, etc.)
- LLM providers mentioned (openai, anthropic, gemini, groq, etc.)
- Components affected (embeddings, search, prompts, server, mcp, etc.)
4. Select appropriate labels from the available labels list:
- Choose labels that accurately reflect the issue's nature
- Be specific but comprehensive
- Add database-specific labels if mentioned: neo4j, falkordb, neptune
- Add component labels if applicable
- DO NOT add priority labels (P1, P2, P3)
- DO NOT add duplicate label - that's handled by the deduplication job
5. Apply the selected labels:
- Use `gh issue edit ${{ github.event.issue.number }} --add-label "label1,label2,label3"` to apply your selected labels
- DO NOT post any comments explaining your decision
- DO NOT communicate directly with users
- If no labels are clearly applicable, do not apply any labels
IMPORTANT GUIDELINES:
- Be thorough in your analysis
- Only select labels from the provided list
- DO NOT post any comments to the issue
- Your ONLY action should be to apply labels using gh issue edit
- It's okay to not add any labels if none are clearly applicable
- DO NOT check for duplicates
claude_args: |
--allowedTools "Bash(gh label list:*),Bash(gh issue view:*),Bash(gh issue edit:*),Bash(gh search:*)"
--model claude-sonnet-4-5-20250929
deduplicate:
runs-on: ubuntu-latest
timeout-minutes: 10
needs: triage
permissions:
contents: read
issues: write
id-token: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 1
- name: Check for duplicate issues
uses: anthropics/claude-code-action@v1
with:
allowed_non_write_users: "*"
prompt: |
Analyze this new issue and check if it's a duplicate of existing issues in the repository.
Issue: #${{ github.event.issue.number }}
Repository: ${{ github.repository }}
Your task:
1. Use mcp__github__get_issue to get details of the current issue (#${{ github.event.issue.number }})
2. Search for similar existing OPEN issues using mcp__github__search_issues with relevant keywords from the issue title and body
3. Compare the new issue with existing ones to identify potential duplicates
Criteria for duplicates:
- Same bug or error being reported
- Same feature request (even if worded differently)
- Same question being asked
- Issues describing the same root problem
If you find duplicates:
- Add a comment on the new issue linking to the original issue(s)
- Apply the "duplicate" label to the new issue
- Be polite and explain why it's a duplicate
- Suggest the user follow the original issue for updates
If it's NOT a duplicate:
- Don't add any comments
- Don't modify labels
Use these tools:
- mcp__github__get_issue: Get issue details
- mcp__github__search_issues: Search for similar issues (use state:open)
- mcp__github__list_issues: List recent issues if needed
- mcp__github__create_issue_comment: Add a comment if duplicate found
- mcp__github__update_issue: Add "duplicate" label
Be thorough but efficient. Focus on finding true duplicates, not just similar issues.
anthropic_api_key: ${{ secrets.ANTHROPIC_API_KEY }}
claude_args: |
--allowedTools "mcp__github__get_issue,mcp__github__search_issues,mcp__github__list_issues,mcp__github__create_issue_comment,mcp__github__update_issue,mcp__github__get_issue_comments"
--model claude-sonnet-4-5-20250929

View file

@ -479,7 +479,7 @@ graphiti = Graphiti(
cross_encoder=GeminiRerankerClient(
config=LLMConfig(
api_key=api_key,
model="gemini-2.5-flash-lite-preview-06-17"
model="gemini-2.5-flash-lite"
)
)
)
@ -487,7 +487,7 @@ graphiti = Graphiti(
# Now you can use Graphiti with Google Gemini for all components
```
The Gemini reranker uses the `gemini-2.5-flash-lite-preview-06-17` model by default, which is optimized for
The Gemini reranker uses the `gemini-2.5-flash-lite` model by default, which is optimized for
cost-effective and low-latency classification tasks. It uses the same boolean classification approach as the OpenAI
reranker, leveraging Gemini's log probabilities feature to rank passage relevance.

View file

@ -37,7 +37,7 @@ else:
logger = logging.getLogger(__name__)
DEFAULT_MODEL = 'gemini-2.5-flash-lite-preview-06-17'
DEFAULT_MODEL = 'gemini-2.5-flash-lite'
class GeminiRerankerClient(CrossEncoderClient):

View file

@ -18,7 +18,9 @@ import logging
from collections.abc import Coroutine
from typing import Any
import neo4j.exceptions
from neo4j import AsyncGraphDatabase, EagerResult
from neo4j.exceptions import ClientError
from typing_extensions import LiteralString
from graphiti_core.driver.driver import GraphDriver, GraphDriverSession, GraphProvider
@ -70,6 +72,15 @@ class Neo4jDriver(GraphDriver):
try:
result = await self.client.execute_query(cypher_query_, parameters_=params, **kwargs)
except neo4j.exceptions.ClientError as e:
# Handle race condition when creating indices/constraints in parallel
# Neo4j 5.26+ may throw EquivalentSchemaRuleAlreadyExists even with IF NOT EXISTS
if 'EquivalentSchemaRuleAlreadyExists' in str(e):
logger.info(f'Index or constraint already exists, continuing: {cypher_query_}')
# Return empty result to indicate success (index exists)
return EagerResult([], None, None) # type: ignore
logger.error(f'Error executing Neo4j query: {e}\n{cypher_query_}\n{params}')
raise
except Exception as e:
logger.error(f'Error executing Neo4j query: {e}\n{cypher_query_}\n{params}')
raise
@ -88,6 +99,21 @@ class Neo4jDriver(GraphDriver):
'CALL db.indexes() YIELD name DROP INDEX name',
)
async def _execute_index_query(self, query: LiteralString) -> EagerResult | None:
"""Execute an index creation query, ignoring 'index already exists' errors.
Neo4j can raise EquivalentSchemaRuleAlreadyExists when concurrent CREATE INDEX
IF NOT EXISTS queries race, even though the index exists. This is safe to ignore.
"""
try:
return await self.execute_query(query)
except ClientError as e:
# Ignore "equivalent index already exists" error (race condition with IF NOT EXISTS)
if 'EquivalentSchemaRuleAlreadyExists' in str(e):
logger.debug(f'Index already exists (concurrent creation): {query[:50]}...')
return None
raise
async def build_indices_and_constraints(self, delete_existing: bool = False):
if delete_existing:
await self.delete_all_indexes()
@ -98,14 +124,7 @@ class Neo4jDriver(GraphDriver):
index_queries: list[LiteralString] = range_indices + fulltext_indices
await semaphore_gather(
*[
self.execute_query(
query,
)
for query in index_queries
]
)
await semaphore_gather(*[self._execute_index_query(query) for query in index_queries])
async def health_check(self) -> None:
"""Check Neo4j connectivity by running the driver's verify_connectivity method."""

View file

@ -48,7 +48,11 @@ def get_extraction_language_instruction(group_id: str | None = None) -> str:
Returns:
str: Language instruction to append to system messages
"""
return '\n\nAny extracted information should be returned in the same language as it was written in.'
return (
'\n\nAny extracted information should be returned in the same language as it was written in. '
'Only output non-English text when the user has written full sentences or phrases in that non-English language. '
'Otherwise, output English.'
)
logger = logging.getLogger(__name__)

View file

@ -45,7 +45,7 @@ else:
logger = logging.getLogger(__name__)
DEFAULT_MODEL = 'gemini-2.5-flash'
DEFAULT_SMALL_MODEL = 'gemini-2.5-flash-lite-preview-06-17'
DEFAULT_SMALL_MODEL = 'gemini-2.5-flash-lite'
# Maximum output tokens for different Gemini models
GEMINI_MODEL_MAX_TOKENS = {
@ -53,7 +53,6 @@ GEMINI_MODEL_MAX_TOKENS = {
'gemini-2.5-pro': 65536,
'gemini-2.5-flash': 65536,
'gemini-2.5-flash-lite': 64000,
'models/gemini-2.5-flash-lite-preview-06-17': 64000,
# Gemini 2.0 models
'gemini-2.0-flash': 8192,
'gemini-2.0-flash-lite': 8192,

View file

@ -31,8 +31,8 @@ from .errors import RateLimitError, RefusalError
logger = logging.getLogger(__name__)
DEFAULT_MODEL = 'gpt-5-mini'
DEFAULT_SMALL_MODEL = 'gpt-5-nano'
DEFAULT_MODEL = 'gpt-4o-mini'
DEFAULT_SMALL_MODEL = 'gpt-4o-mini'
DEFAULT_REASONING = 'minimal'
DEFAULT_VERBOSITY = 'low'

View file

@ -41,6 +41,16 @@ class DateFilter(BaseModel):
)
class PropertyFilter(BaseModel):
property_name: str = Field(description='Property name')
property_value: str | int | float | None = Field(
description='Value you want to match on for the property'
)
comparison_operator: ComparisonOperator = Field(
description='Comparison operator for the property'
)
class SearchFilters(BaseModel):
node_labels: list[str] | None = Field(
default=None, description='List of node labels to filter on'
@ -53,6 +63,7 @@ class SearchFilters(BaseModel):
created_at: list[list[DateFilter]] | None = Field(default=None)
expired_at: list[list[DateFilter]] | None = Field(default=None)
edge_uuids: list[str] | None = Field(default=None)
property_filters: list[PropertyFilter] | None = Field(default=None)
def cypher_to_opensearch_operator(op: ComparisonOperator) -> str:

49
mcp_server/.env Normal file
View file

@ -0,0 +1,49 @@
# Graphiti MCP Server Environment Configuration
MCP_SERVER_HOST=gmakai.online
# Neo4j Database Configuration
# These settings are used to connect to your Neo4j database
NEO4J_URI=bolt://neo4j:7687
NEO4J_USER=neo4j
NEO4J_PASSWORD=kg3Jsdb2
# OpenAI API Configuration
# Required for LLM operations
OPENAI_API_KEY=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
MODEL_NAME=gpt5.1-nano
# Optional: Only needed for non-standard OpenAI endpoints
OPENAI_BASE_URL=https://openrouter.ai/api/v1
# Optional: Group ID for namespacing graph data
# GROUP_ID=my_project
# Concurrency Control
# Controls how many episodes can be processed simultaneously
# Default: 10 (suitable for OpenAI Tier 3, mid-tier Anthropic)
# Adjust based on your LLM provider's rate limits:
# - OpenAI Tier 1 (free): 1-2
# - OpenAI Tier 2: 5-8
# - OpenAI Tier 3: 10-15
# - OpenAI Tier 4: 20-50
# - Anthropic default: 5-8
# - Anthropic high tier: 15-30
# - Ollama (local): 1-5
# See README.md "Concurrency and LLM Provider 429 Rate Limit Errors" for details
SEMAPHORE_LIMIT=10
# Optional: Path configuration for Docker
# PATH=/root/.local/bin:${PATH}
# Optional: Memory settings for Neo4j (used in Docker Compose)
# NEO4J_server_memory_heap_initial__size=512m
# NEO4J_server_memory_heap_max__size=1G
# NEO4J_server_memory_pagecache_size=512m
# Azure OpenAI configuration
# Optional: Only needed for Azure OpenAI endpoints
# AZURE_OPENAI_ENDPOINT=your_azure_openai_endpoint_here
# AZURE_OPENAI_API_VERSION=2025-01-01-preview
# AZURE_OPENAI_DEPLOYMENT_NAME=gpt-4o-gpt-4o-mini-deployment
# AZURE_OPENAI_EMBEDDING_API_VERSION=2023-05-15
# AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME=text-embedding-3-large-deployment
# AZURE_OPENAI_USE_MANAGED_IDENTITY=false

View file

@ -8,7 +8,7 @@ server:
llm:
provider: "openai" # Options: openai, azure_openai, anthropic, gemini, groq
model: "gpt-5-mini"
model: "gpt-4o-mini"
max_tokens: 4096
providers:

View file

@ -8,7 +8,7 @@ server:
llm:
provider: "openai" # Options: openai, azure_openai, anthropic, gemini, groq
model: "gpt-5-mini"
model: "gpt-4o-mini"
max_tokens: 4096
providers:

View file

@ -8,7 +8,7 @@ server:
llm:
provider: "openai" # Options: openai, azure_openai, anthropic, gemini, groq
model: "gpt-5-mini"
model: "gpt-4o-mini"
max_tokens: 4096
providers:

View file

@ -12,7 +12,7 @@ server:
llm:
provider: "openai" # Options: openai, azure_openai, anthropic, gemini, groq
model: "gpt-5-mini"
model: "gpt-4o-mini"
max_tokens: 4096
providers:

View file

@ -1,23 +1,23 @@
services:
neo4j:
image: neo4j:5.26.0
image: neo4j:latest
ports:
- "7474:7474" # HTTP
- "7687:7687" # Bolt
environment:
- NEO4J_AUTH=${NEO4J_USER:-neo4j}/${NEO4J_PASSWORD:-demodemo}
- NEO4J_server_memory_heap_initial__size=512m
- NEO4J_server_memory_heap_max__size=1G
- NEO4J_server_memory_pagecache_size=512m
- NEO4J_AUTH=${NEO4J_USER:-neo4j}/${NEO4J_PASSWORD:-kg3Jsdb2}
volumes:
- neo4j_data:/data
- neo4j_logs:/logs
- /data/neo4j/data:/data
- /data/neo4j/logs:/logs
- /data/neo4j/plugins:/plugins
- /data/neo4j/config:/config
healthcheck:
test: ["CMD", "wget", "-O", "/dev/null", "http://localhost:7474"]
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
restart: always
graphiti-mcp:
image: zepai/knowledge-graph-mcp:standalone
@ -27,9 +27,9 @@ services:
build:
context: ..
dockerfile: docker/Dockerfile.standalone
env_file:
- path: ../.env
required: false
#env_file:
# - path: ../.env
# required: true
depends_on:
neo4j:
condition: service_healthy
@ -37,13 +37,18 @@ services:
# Database configuration
- NEO4J_URI=${NEO4J_URI:-bolt://neo4j:7687}
- NEO4J_USER=${NEO4J_USER:-neo4j}
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-demodemo}
- NEO4J_PASSWORD=${NEO4J_PASSWORD:-kg3Jsdb2}
- NEO4J_DATABASE=${NEO4J_DATABASE:-neo4j}
# Application configuration
- GRAPHITI_GROUP_ID=${GRAPHITI_GROUP_ID:-main}
- SEMAPHORE_LIMIT=${SEMAPHORE_LIMIT:-10}
- CONFIG_PATH=/app/mcp/config/config.yaml
- PATH=/root/.local/bin:${PATH}
- MCP_SERVER_HOST=gmakai.online
- OPENAI_API_KEY=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
- MODEL_NAME=gpt5.1-nano
- OPENAI_BASE_URL=https://openrouter.ai/api/v1
volumes:
- ../config/config-docker-neo4j.yaml:/app/mcp/config/config.yaml:ro
ports:

View file

@ -147,7 +147,7 @@ class LLMConfig(BaseModel):
"""LLM configuration."""
provider: str = Field(default='openai', description='LLM provider')
model: str = Field(default='gpt-4.1', description='Model name')
model: str = Field(default='gpt-4o-mini', description='Model name')
temperature: float | None = Field(
default=None, description='Temperature (optional, defaults to None for reasoning models)'
)

View file

@ -1,7 +1,7 @@
[project]
name = "graphiti-core"
description = "A temporal graph building library"
version = "0.24.1"
version = "0.24.3"
authors = [
{ name = "Paul Paliychuk", email = "paul@getzep.com" },
{ name = "Preston Rasmussen", email = "preston@getzep.com" },

View file

@ -455,6 +455,62 @@
"created_at": "2025-11-06T08:39:46Z",
"repoId": 840056306,
"pullRequestNo": 1053
},
{
"name": "supmo668",
"id": 28805779,
"comment_id": 3550309664,
"created_at": "2025-11-19T01:56:25Z",
"repoId": 840056306,
"pullRequestNo": 1072
},
{
"name": "donbr",
"id": 7340008,
"comment_id": 3568970102,
"created_at": "2025-11-24T05:19:42Z",
"repoId": 840056306,
"pullRequestNo": 1081
},
{
"name": "apetti1920",
"id": 4706645,
"comment_id": 3572726648,
"created_at": "2025-11-24T21:07:34Z",
"repoId": 840056306,
"pullRequestNo": 1084
},
{
"name": "ZLBillShaw",
"id": 55940186,
"comment_id": 3583997833,
"created_at": "2025-11-27T02:45:53Z",
"repoId": 840056306,
"pullRequestNo": 1085
},
{
"name": "ronaldmego",
"id": 17481958,
"comment_id": 3617267429,
"created_at": "2025-12-05T14:59:42Z",
"repoId": 840056306,
"pullRequestNo": 1094
},
{
"name": "NShumway",
"id": 29358113,
"comment_id": 3634967978,
"created_at": "2025-12-10T01:26:49Z",
"repoId": 840056306,
"pullRequestNo": 1102
},
{
"name": "husniadil",
"id": 10581130,
"comment_id": 3650156180,
"created_at": "2025-12-14T03:37:59Z",
"repoId": 840056306,
"pullRequestNo": 1105
}
]
}

View file

@ -455,7 +455,6 @@ class TestGeminiClientGenerateResponse:
('gemini-2.5-flash', 65536),
('gemini-2.5-pro', 65536),
('gemini-2.5-flash-lite', 64000),
('models/gemini-2.5-flash-lite-preview-06-17', 64000),
('gemini-2.0-flash', 8192),
('gemini-1.5-pro', 8192),
('gemini-1.5-flash', 8192),

2
uv.lock generated
View file

@ -808,7 +808,7 @@ wheels = [
[[package]]
name = "graphiti-core"
version = "0.24.1"
version = "0.24.3"
source = { editable = "." }
dependencies = [
{ name = "diskcache" },