Some checks are pending
CodeQL Advanced / Analyze (actions) (push) Waiting to run
CodeQL Advanced / Analyze (python) (push) Waiting to run
Lint with Ruff / ruff (push) Waiting to run
Pyright Type Check / pyright (push) Waiting to run
Tests / database-integration-tests (push) Waiting to run
Tests / unit-tests (push) Waiting to run
49 lines
1.8 KiB
Bash
49 lines
1.8 KiB
Bash
# Graphiti MCP Server Environment Configuration
|
|
MCP_SERVER_HOST=gmakai.online
|
|
# Neo4j Database Configuration
|
|
# These settings are used to connect to your Neo4j database
|
|
NEO4J_URI=bolt://gmakai.online:7687
|
|
NEO4J_USER=neo4j
|
|
NEO4J_PASSWORD=kg3Jsdb2
|
|
|
|
# OpenAI API Configuration
|
|
# Required for LLM operations
|
|
OPENAI_API_KEY=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
|
|
MODEL_NAME=gpt5.1-nano
|
|
|
|
# Optional: Only needed for non-standard OpenAI endpoints
|
|
OPENAI_BASE_URL=https://openrouter.ai/api/v1
|
|
|
|
# Optional: Group ID for namespacing graph data
|
|
# GROUP_ID=my_project
|
|
|
|
# Concurrency Control
|
|
# Controls how many episodes can be processed simultaneously
|
|
# Default: 10 (suitable for OpenAI Tier 3, mid-tier Anthropic)
|
|
# Adjust based on your LLM provider's rate limits:
|
|
# - OpenAI Tier 1 (free): 1-2
|
|
# - OpenAI Tier 2: 5-8
|
|
# - OpenAI Tier 3: 10-15
|
|
# - OpenAI Tier 4: 20-50
|
|
# - Anthropic default: 5-8
|
|
# - Anthropic high tier: 15-30
|
|
# - Ollama (local): 1-5
|
|
# See README.md "Concurrency and LLM Provider 429 Rate Limit Errors" for details
|
|
SEMAPHORE_LIMIT=10
|
|
|
|
# Optional: Path configuration for Docker
|
|
# PATH=/root/.local/bin:${PATH}
|
|
|
|
# Optional: Memory settings for Neo4j (used in Docker Compose)
|
|
# NEO4J_server_memory_heap_initial__size=512m
|
|
# NEO4J_server_memory_heap_max__size=1G
|
|
# NEO4J_server_memory_pagecache_size=512m
|
|
|
|
# Azure OpenAI configuration
|
|
# Optional: Only needed for Azure OpenAI endpoints
|
|
# AZURE_OPENAI_ENDPOINT=your_azure_openai_endpoint_here
|
|
# AZURE_OPENAI_API_VERSION=2025-01-01-preview
|
|
# AZURE_OPENAI_DEPLOYMENT_NAME=gpt-4o-gpt-4o-mini-deployment
|
|
# AZURE_OPENAI_EMBEDDING_API_VERSION=2023-05-15
|
|
# AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME=text-embedding-3-large-deployment
|
|
# AZURE_OPENAI_USE_MANAGED_IDENTITY=false
|