feat: Add support for Chutes API integration

- Introduced CHUTES_API_TOKEN in .env.example files for configuration.
- Updated README.md in quickstart example to include Chutes API setup.
- Created quickstart_chutes.py example for demonstrating Chutes integration.
- Added ChutesEmbedder and ChutesClient classes for embedding and LLM functionalities.
- Updated LLM and embedder provider configurations to support Chutes.
- Enhanced factories to include Chutes client and embedder.
- Modified schema.py to define Chutes provider configuration.
- Updated mcp_server configuration files to integrate Chutes API.
- Added necessary dependencies in pyproject.toml for Chutes support.
This commit is contained in:
facronactz 2025-11-15 16:12:09 +07:00
parent 90d7757c17
commit a7f2c92bb4
23 changed files with 547 additions and 23 deletions

View file

@ -1,4 +1,5 @@
OPENAI_API_KEY=
CHUTES_API_TOKEN=
# Neo4j database connection
NEO4J_URI=

View file

@ -1,2 +1,3 @@
# OpenAI API key (required for LLM inference and embeddings)
OPENAI_API_KEY=your_api_key_here
CHUTES_API_TOKEN=your_api_key_here

View file

@ -35,6 +35,7 @@ pip install graphiti-core
```bash
# Required for LLM and embedding
export OPENAI_API_KEY=your_openai_api_key
export CHUTES_API_TOKEN=your_chutes_api_key
# Optional Neo4j connection parameters (defaults shown)
export NEO4J_URI=bolt://localhost:7687
@ -67,6 +68,9 @@ python quickstart_falkordb.py
# For Amazon Neptune
python quickstart_neptune.py
# For Chutes
python quickstart_chutes.py
```
## What This Example Demonstrates

View file

@ -0,0 +1,157 @@
"""
Copyright 2025, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import asyncio
import json
import logging
import os
from datetime import datetime, timezone
from logging import INFO
from dotenv import load_dotenv
from graphiti_core import Graphiti
from graphiti_core.embedder.chutes import ChutesEmbedder
from graphiti_core.llm_client.chutes_client import ChutesClient
from graphiti_core.nodes import EpisodeType
#################################################
# CONFIGURATION
#################################################
# Set up logging and environment variables for
# connecting to Neo4j database
#################################################
# Configure logging
logging.basicConfig(
level=INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
logger = logging.getLogger(__name__)
load_dotenv()
# Neo4j connection parameters
# Make sure Neo4j Desktop is running with a local DBMS started
neo4j_uri = os.environ.get('NEO4J_URI', 'bolt://localhost:7687')
neo4j_user = os.environ.get('NEO4J_USER', 'neo4j')
neo4j_password = os.environ.get('NEO4J_PASSWORD', 'password')
chutes_api_key = os.environ.get('CHUTES_API_TOKEN')
if not neo4j_uri or not neo4j_user or not neo4j_password:
raise ValueError('NEO4J_URI, NEO4J_USER, and NEO4J_PASSWORD must be set')
if not chutes_api_key:
raise ValueError('CHUTES_API_TOKEN must be set')
async def main():
#################################################
# INITIALIZATION
#################################################
# Connect to Neo4j and set up Graphiti indices
# This is required before using other Graphiti
# functionality
#################################################
# Initialize Graphiti with Neo4j connection and Chutes provider
graphiti = Graphiti(
neo4j_uri,
neo4j_user,
neo4j_password,
llm_client=ChutesClient(),
embedder=ChutesEmbedder(),
)
try:
#################################################
# ADDING EPISODES
#################################################
# Episodes are the primary units of information
# in Graphiti. They can be text or structured JSON
# and are automatically processed to extract entities
# and relationships.
#################################################
# Example: Add Episodes
# Episodes list containing both text and JSON episodes
episodes = [
{
'content': 'Kamala Harris is the Attorney General of California. She was previously '
'the district attorney for San Francisco.',
'type': EpisodeType.text,
'description': 'podcast transcript',
},
{
'content': 'As AG, Harris was in office from January 3, 2011 January 3, 2017',
'type': EpisodeType.text,
'description': 'podcast transcript',
},
]
# Add episodes to the graph
for i, episode in enumerate(episodes):
await graphiti.add_episode(
name=f'Freakonomics Radio {i}',
episode_body=episode['content']
if isinstance(episode['content'], str)
else json.dumps(episode['content']),
source=episode['type'],
source_description=episode['description'],
reference_time=datetime.now(timezone.utc),
)
print(f'Added episode: Freakonomics Radio {i} ({episode["type"].value})')
#################################################
# BASIC SEARCH
#################################################
# The simplest way to retrieve relationships (edges)
# from Graphiti is using the search method, which
# performs a hybrid search combining semantic
# similarity and BM25 text retrieval.
#################################################
# Perform a hybrid search combining semantic similarity and BM25 retrieval
print("\nSearching for: 'Who was the California Attorney General?'")
results = await graphiti.search('Who was the California Attorney General?')
# Print search results
print('\nSearch Results:')
for result in results:
print(f'UUID: {result.uuid}')
print(f'Fact: {result.fact}')
if hasattr(result, 'valid_at') and result.valid_at:
print(f'Valid from: {result.valid_at}')
if hasattr(result, 'invalid_at') and result.invalid_at:
print(f'Valid until: {result.invalid_at}')
print('---')
finally:
#################################################
# CLEANUP
#################################################
# Always close the connection to Neo4j when
# finished to properly release resources
#################################################
# Close the connection
await graphiti.close()
print('\nConnection closed')
if __name__ == '__main__':
asyncio.run(main())

View file

@ -1,8 +1,11 @@
from .client import EmbedderClient
from .openai import OpenAIEmbedder, OpenAIEmbedderConfig
from .chutes import ChutesEmbedder, ChutesEmbedderConfig
__all__ = [
'EmbedderClient',
'OpenAIEmbedder',
'OpenAIEmbedderConfig',
'ChutesEmbedder',
'ChutesEmbedderConfig',
]

View file

@ -0,0 +1,65 @@
"""
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from collections.abc import Iterable
from openai import AsyncOpenAI
from openai.types import EmbeddingModel
from .client import EmbedderClient, EmbedderConfig
DEFAULT_EMBEDDING_MODEL = "qwen-3-8b"
class ChutesEmbedderConfig(EmbedderConfig):
embedding_model: EmbeddingModel | str = DEFAULT_EMBEDDING_MODEL
api_key: str | None = os.environ.get("CHUTES_API_TOKEN")
base_url: str | None = "https://chutes-qwen-qwen3-embedding-8b.chutes.ai/v1"
class ChutesEmbedder(EmbedderClient):
"""
Chutes Embedder Client
"""
def __init__(
self,
config: ChutesEmbedderConfig | None = None,
client: AsyncOpenAI | None = None,
):
if config is None:
config = ChutesEmbedderConfig()
self.config = config
if client is not None:
self.client = client
else:
self.client = AsyncOpenAI(api_key=config.api_key, base_url=config.base_url)
async def create(
self, input_data: str | list[str] | Iterable[int] | Iterable[Iterable[int]]
) -> list[float]:
result = await self.client.embeddings.create(
input=input_data, model=None
)
return result.data[0].embedding[: self.config.embedding_dim]
async def create_batch(self, input_data_list: list[str]) -> list[list[float]]:
result = await self.client.embeddings.create(
input=input_data_list, model=None
)
return [embedding.embedding[: self.config.embedding_dim] for embedding in result.data]

View file

@ -18,5 +18,6 @@ from .client import LLMClient
from .config import LLMConfig
from .errors import RateLimitError
from .openai_client import OpenAIClient
from .chutes_client import ChutesClient
__all__ = ['LLMClient', 'OpenAIClient', 'LLMConfig', 'RateLimitError']
__all__ = ['LLMClient', 'OpenAIClient', 'LLMConfig', 'RateLimitError', "ChutesClient"]

View file

@ -0,0 +1,90 @@
"""
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUTHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import typing
from openai import AsyncOpenAI
from openai.types.chat import ChatCompletionMessageParam
from pydantic import BaseModel
from .config import DEFAULT_MAX_TOKENS, LLMConfig
from .openai_base_client import DEFAULT_REASONING, DEFAULT_VERBOSITY, BaseOpenAIClient
class ChutesClient(BaseOpenAIClient):
"""
ChutesClient is a client class for interacting with Chutes's language models.
"""
def __init__(
self,
config: LLMConfig | None = None,
cache: bool = False,
client: typing.Any = None,
max_tokens: int = DEFAULT_MAX_TOKENS,
reasoning: str = DEFAULT_REASONING,
verbosity: str = DEFAULT_VERBOSITY,
):
"""
Initialize the ChutesClient with the provided configuration, cache setting, and client.
"""
super().__init__(config, cache, max_tokens, reasoning, verbosity)
if config is None:
config = LLMConfig()
if client is None:
self.client = AsyncOpenAI(
api_key=config.api_key,
base_url="https://llm.chutes.ai/v1",
)
else:
self.client = client
async def _create_structured_completion(
self,
model: str,
messages: list[ChatCompletionMessageParam],
temperature: float | None,
max_tokens: int,
response_model: type[BaseModel],
reasoning: str | None = None,
verbosity: str | None = None,
):
"""Create a structured completion using Chutes's API."""
return await self._create_completion(
model, messages, temperature, max_tokens, response_model
)
async def _create_completion(
self,
model: str,
messages: list[ChatCompletionMessageParam],
temperature: float | None,
max_tokens: int,
response_model: type[BaseModel] | None = None,
reasoning: str | None = None,
verbosity: str | None = None,
):
"""Create a regular completion with JSON format."""
return await self.client.chat.completions.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
response_format={"type": "json_object"} if response_model else None,
)

View file

@ -223,6 +223,8 @@ class LLMClient(ABC):
return 'gemini'
elif 'groq' in class_name:
return 'groq'
elif 'chutes' in class_name:
return 'chutes'
else:
return 'unknown'

View file

@ -25,6 +25,11 @@ class ModelSize(Enum):
medium = 'medium'
class LLMProvider(str, Enum):
OPENAI = "openai"
CHUTES = "chutes"
class LLMConfig:
"""
Configuration class for the Language Learning Model (LLM).
@ -42,6 +47,7 @@ class LLMConfig:
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_MAX_TOKENS,
small_model: str | None = None,
provider: LLMProvider = LLMProvider.OPENAI,
):
"""
Initialize the LLMConfig with the provided parameters.
@ -59,6 +65,8 @@ class LLMConfig:
small_model (str, optional): The specific LLM model to use for generating responses of simpler prompts.
Defaults to "gpt-4.1-nano".
provider (LLMProvider, optional): The LLM provider to use.
Defaults to LLMProvider.OPENAI.
"""
self.base_url = base_url
self.api_key = api_key
@ -66,3 +74,4 @@ class LLMConfig:
self.small_model = small_model
self.temperature = temperature
self.max_tokens = max_tokens
self.provider = provider

View file

@ -115,14 +115,21 @@ class BaseOpenAIClient(LLMClient):
def _handle_structured_response(self, response: Any) -> dict[str, Any]:
"""Handle structured response parsing and validation."""
response_object = response.output_text
if response_object:
return json.loads(response_object)
elif response_object.refusal:
raise RefusalError(response_object.refusal)
# Check if the response has the beta output_text attribute
if hasattr(response, 'output_text') and response.output_text is not None:
response_content = response.output_text
elif hasattr(response, 'choices') and response.choices:
# Fallback to standard OpenAI chat completion content
response_content = response.choices[0].message.content
else:
raise Exception(f'Invalid response from LLM: {response_object.model_dump()}')
raise Exception(f'Invalid response from LLM: {response}')
if response_content:
return json.loads(response_content)
elif hasattr(response, 'refusal') and response.refusal:
raise RefusalError(response.refusal)
else:
raise Exception(f'Invalid response from LLM: {response}')
def _handle_json_response(self, response: Any) -> dict[str, Any]:
"""Handle JSON response parsing."""

View file

@ -180,6 +180,14 @@ def nodes(context: dict[str, Any]) -> list[Message]:
- Only use idx values that appear in EXISTING ENTITIES.
- Set duplicate_idx to the smallest idx you collected for that entity, or -1 if duplicates is empty.
- Never fabricate entities or indices.
Response Format:
Your response must be a JSON object with a single key: "entity_resolutions".
The value of "entity_resolutions" must be a list of JSON objects, where each object has the following keys:
- "id": The integer id from the ENTITIES list.
- "name": The best full name for the entity.
- "duplicate_idx": The index of the best duplicate match from EXISTING ENTITIES, or -1 if none.
- "duplicates": A sorted list of all duplicate indices from EXISTING ENTITIES.
""",
),
]

View file

@ -112,6 +112,16 @@ You may use information from the PREVIOUS MESSAGES only to disambiguate referenc
{context['custom_prompt']}
# RESPONSE FORMAT
Your response must be a JSON object with a single key: "edges".
The value of "edges" must be a list of JSON objects, where each object has the following keys:
- "relation_type": A SCREAMING_SNAKE_CASE string representing the fact type (e.g., FOUNDED, WORKS_AT).
- "source_entity_id": The ID of the source entity from the ENTITIES list.
- "target_entity_id": The ID of the target entity from the ENTITIES list.
- "fact": A natural language description of the relationship.
- "valid_at": The start time of the relationship in ISO 8601 format (or null).
- "invalid_at": The end time of the relationship in ISO 8601 format (or null).
# EXTRACTION RULES
1. **Entity ID Validation**: `source_entity_id` and `target_entity_id` must use only the `id` values from the ENTITIES list provided above.

View file

@ -125,6 +125,12 @@ reference entities. Only extract distinct entities from the CURRENT MESSAGE. Don
- Be **explicit and unambiguous** in naming entities (e.g., use full names when available).
{context['custom_prompt']}
Response Format:
Your response must be a JSON object with a single key: "extracted_entities".
The value of "extracted_entities" must be a list of JSON objects, where each object has the following keys:
- "name": The name of the extracted entity.
- "entity_type_id": The ID of the classified entity type.
"""
return [
Message(role='system', content=sys_prompt),
@ -154,6 +160,12 @@ Given the above source description and JSON, extract relevant entities from the
For each entity extracted, also determine its entity type based on the provided ENTITY TYPES and their descriptions.
Indicate the classified entity type by providing its entity_type_id.
Response Format:
Your response must be a JSON object with a single key: "extracted_entities".
The value of "extracted_entities" must be a list of JSON objects, where each object has the following keys:
- "name": The name of the extracted entity.
- "entity_type_id": The ID of the classified entity type.
Guidelines:
1. Extract all entities that the JSON represents. This will often be something like a "name" or "user" field
2. Extract all entities mentioned in all other properties throughout the JSON structure
@ -184,6 +196,12 @@ Indicate the classified entity type by providing its entity_type_id.
{context['custom_prompt']}
Response Format:
Your response must be a JSON object with a single key: "extracted_entities".
The value of "extracted_entities" must be a list of JSON objects, where each object has the following keys:
- "name": The name of the extracted entity.
- "entity_type_id": The ID of the classified entity type.
Guidelines:
1. Extract significant entities, concepts, or actors mentioned in the conversation.
2. Avoid creating nodes for relationships or actions.

View file

@ -2,13 +2,13 @@
# Neo4j Database Configuration
# These settings are used to connect to your Neo4j database
NEO4J_URI=bolt://localhost:7687
NEO4J_URI=neo4j+s://6a25b5ba.databases.neo4j.io
NEO4J_USER=neo4j
NEO4J_PASSWORD=demodemo
NEO4J_PASSWORD=YuU8ypE1yzjVbn5LPaNkLcotsLL4aAOILGIQtSG2KAY
# OpenAI API Configuration
# Required for LLM operations
OPENAI_API_KEY=your_openai_api_key_here
OPENAI_API_KEY=dummy-key
MODEL_NAME=gpt-4.1-mini
# Optional: Only needed for non-standard OpenAI endpoints
@ -47,3 +47,6 @@ SEMAPHORE_LIMIT=10
# AZURE_OPENAI_EMBEDDING_API_VERSION=2023-05-15
# AZURE_OPENAI_EMBEDDING_DEPLOYMENT_NAME=text-embedding-3-large-deployment
# AZURE_OPENAI_USE_MANAGED_IDENTITY=false
# Chutes AI Configuration
CHUTES_API_KEY=

View file

@ -11,8 +11,8 @@ server:
port: 8000
llm:
provider: "openai" # Options: openai, azure_openai, anthropic, gemini, groq
model: "gpt-5-mini"
provider: "chutes" # Options: openai, azure_openai, anthropic, gemini, groq, chutes
model: "moonshotai/Kimi-K2-Instruct-0905"
max_tokens: 4096
providers:
@ -42,9 +42,14 @@ llm:
api_key: ${GROQ_API_KEY}
api_url: ${GROQ_API_URL:https://api.groq.com/openai/v1}
chutes:
api_key: ${CHUTES_API_KEY}
api_url: ${CHUTES_API_URL:https://llm.chutes.ai/v1}
embedding_url: ${CHUTES_EMBEDDING_URL:https://chutes-qwen-qwen3-embedding-8b.chutes.ai/v1}
embedder:
provider: "openai" # Options: openai, azure_openai, gemini, voyage
model: "text-embedding-3-small"
provider: "chutes" # Options: openai, azure_openai, gemini, voyage, chutes
model: "Qwen/Qwen2-57B-A14B-Instruct"
dimensions: 1536
providers:
@ -70,8 +75,13 @@ embedder:
api_url: ${VOYAGE_API_URL:https://api.voyageai.com/v1}
model: "voyage-3"
chutes:
api_key: ${CHUTES_API_KEY}
api_url: ${CHUTES_API_URL:https://llm.chutes.ai/v1}
embedding_url: ${CHUTES_EMBEDDING_URL:https://chutes-qwen-qwen3-embedding-8b.chutes.ai/v1}
database:
provider: "falkordb" # Default: falkordb. Options: neo4j, falkordb
provider: "neo4j" # Default: falkordb. Options: neo4j, falkordb
providers:
falkordb:
@ -88,7 +98,7 @@ database:
graphiti:
group_id: ${GRAPHITI_GROUP_ID:main}
episode_id_prefix: ${EPISODE_ID_PREFIX:}
episode_id_prefix: ${EPISODE_ID_PREFIX:ep}
user_id: ${USER_ID:mcp_user}
entity_types:
- name: "Preference"

View file

@ -5,7 +5,7 @@ description = "Graphiti MCP Server"
readme = "README.md"
requires-python = ">=3.10,<4"
dependencies = [
"mcp>=1.9.4",
"mcp[cli]>=1.9.4",
"openai>=1.91.0",
"graphiti-core[falkordb]>=0.23.1",
"pydantic-settings>=2.0.0",

View file

@ -125,6 +125,14 @@ class GroqProviderConfig(BaseModel):
api_url: str = 'https://api.groq.com/openai/v1'
class ChutesProviderConfig(BaseModel):
"""Chutes provider configuration."""
api_key: str | None = None
api_url: str = 'https://llm.chutes.ai/v1'
embedding_url: str = 'https://chutes-qwen-qwen3-embedding-8b.chutes.ai/v1'
class VoyageProviderConfig(BaseModel):
"""Voyage AI provider configuration."""
@ -141,6 +149,7 @@ class LLMProvidersConfig(BaseModel):
anthropic: AnthropicProviderConfig | None = None
gemini: GeminiProviderConfig | None = None
groq: GroqProviderConfig | None = None
chutes: ChutesProviderConfig | None = None
class LLMConfig(BaseModel):
@ -162,6 +171,7 @@ class EmbedderProvidersConfig(BaseModel):
azure_openai: AzureOpenAIProviderConfig | None = None
gemini: GeminiProviderConfig | None = None
voyage: VoyageProviderConfig | None = None
chutes: ChutesProviderConfig | None = None
class EmbedderConfig(BaseModel):

View file

@ -752,7 +752,6 @@ async def get_status() -> StatusResponse:
message=f'Graphiti MCP server is running but database connection failed: {error_msg}',
)
@mcp.custom_route('/health', methods=['GET'])
async def health_check(request) -> JSONResponse:
"""Health check endpoint for Docker and load balancers."""
@ -796,12 +795,12 @@ async def initialize_server() -> ServerConfig:
# Provider selection arguments
parser.add_argument(
'--llm-provider',
choices=['openai', 'azure_openai', 'anthropic', 'gemini', 'groq'],
choices=['openai', 'azure_openai', 'anthropic', 'gemini', 'groq', 'chutes'],
help='LLM provider to use',
)
parser.add_argument(
'--embedder-provider',
choices=['openai', 'azure_openai', 'gemini', 'voyage'],
choices=['openai', 'azure_openai', 'gemini', 'voyage', 'chutes'],
help='Embedder provider to use',
)
parser.add_argument(

View file

@ -0,0 +1,6 @@
"""Chutes clients for LLM and Embedder."""
from graphiti_core.llm_client.chutes_client import ChutesClient as ChutesLLMClient
from graphiti_core.embedder.chutes import ChutesEmbedder as ChutesEmbedderClient
__all__ = ["ChutesLLMClient", "ChutesEmbedderClient"]

View file

@ -43,6 +43,13 @@ try:
except ImportError:
HAS_VOYAGE_EMBEDDER = False
try:
from services.chutes_client import ChutesEmbedderClient
HAS_CHUTES_EMBEDDER = True
except ImportError:
HAS_CHUTES_EMBEDDER = False
try:
from graphiti_core.llm_client.azure_openai_client import AzureOpenAILLMClient
@ -70,6 +77,12 @@ try:
HAS_GROQ = True
except ImportError:
HAS_GROQ = False
try:
from services.chutes_client import ChutesLLMClient
HAS_CHUTES_LLM = True
except ImportError:
HAS_CHUTES_LLM = False
from utils.utils import create_azure_credential_token_provider
@ -246,6 +259,27 @@ class LLMClientFactory:
)
return GroqClient(config=llm_config)
case 'chutes':
if not HAS_CHUTES_LLM:
raise ValueError(
'Chutes LLM client not available in current graphiti-core version'
)
if not config.providers.chutes:
raise ValueError('Chutes provider configuration not found')
api_key = config.providers.chutes.api_key
_validate_api_key('Chutes', api_key, logger)
llm_config = GraphitiLLMConfig(
api_key=api_key,
base_url=config.providers.chutes.api_url,
model=config.model,
small_model=config.model,
temperature=config.temperature,
max_tokens=config.max_tokens,
)
return ChutesLLMClient(config=llm_config)
case _:
raise ValueError(f'Unsupported LLM provider: {provider}')
@ -356,6 +390,27 @@ class EmbedderFactory:
)
return VoyageAIEmbedder(config=voyage_config)
case 'chutes':
if not HAS_CHUTES_EMBEDDER:
raise ValueError(
'Chutes embedder not available in current graphiti-core version'
)
if not config.providers.chutes:
raise ValueError('Chutes provider configuration not found')
api_key = config.providers.chutes.api_key
_validate_api_key('Chutes Embedder', api_key, logger)
from graphiti_core.embedder.chutes import ChutesEmbedderConfig
embedder_config = ChutesEmbedderConfig(
api_key=api_key,
base_url=config.providers.chutes.embedding_url,
embedding_model=config.model,
embedding_dim=config.dimensions,
)
return ChutesEmbedderClient(config=embedder_config)
case _:
raise ValueError(f'Unsupported Embedder provider: {provider}')

68
mcp_server/uv.lock generated
View file

@ -994,6 +994,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/79/ed/7a48189bdad850cfd47df671204c31779dd190de6bc681f169d4535f852e/langsmith-0.4.16-py3-none-any.whl", hash = "sha256:9ba95ed09b057dfe227e882f5446e1824bfc9f2c89de542ee6f0f8d90ab953a7", size = 375761, upload-time = "2025-08-22T15:45:14.82Z" },
]
[[package]]
name = "markdown-it-py"
version = "4.0.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "mdurl" },
]
sdist = { url = "https://files.pythonhosted.org/packages/5b/f5/4ec618ed16cc4f8fb3b701563655a69816155e79e24a17b651541804721d/markdown_it_py-4.0.0.tar.gz", hash = "sha256:cb0a2b4aa34f932c007117b194e945bd74e0ec24133ceb5bac59009cda1cb9f3", size = 73070, upload-time = "2025-08-11T12:57:52.854Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/94/54/e7d793b573f298e1c9013b8c4dade17d481164aa517d1d7148619c2cedbf/markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147", size = 87321, upload-time = "2025-08-11T12:57:51.923Z" },
]
[[package]]
name = "markupsafe"
version = "3.0.2"
@ -1072,13 +1084,19 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/97/fc/80e655c955137393c443842ffcc4feccab5b12fa7cb8de9ced90f90e6998/mcp-1.9.4-py3-none-any.whl", hash = "sha256:7fcf36b62936adb8e63f89346bccca1268eeca9bf6dfb562ee10b1dfbda9dac0", size = 130232, upload-time = "2025-06-12T08:20:28.551Z" },
]
[package.optional-dependencies]
cli = [
{ name = "python-dotenv" },
{ name = "typer" },
]
[[package]]
name = "mcp-server"
version = "1.0.1"
source = { virtual = "." }
dependencies = [
{ name = "graphiti-core", extra = ["falkordb"] },
{ name = "mcp" },
{ name = "mcp", extra = ["cli"] },
{ name = "openai" },
{ name = "pydantic-settings" },
{ name = "pyyaml" },
@ -1117,7 +1135,7 @@ requires-dist = [
{ name = "google-genai", marker = "extra == 'providers'", specifier = ">=1.8.0" },
{ name = "graphiti-core", extras = ["falkordb"], editable = "../" },
{ name = "groq", marker = "extra == 'providers'", specifier = ">=0.2.0" },
{ name = "mcp", specifier = ">=1.9.4" },
{ name = "mcp", extras = ["cli"], specifier = ">=1.9.4" },
{ name = "openai", specifier = ">=1.91.0" },
{ name = "pydantic-settings", specifier = ">=2.0.0" },
{ name = "pyyaml", specifier = ">=6.0" },
@ -1140,6 +1158,15 @@ dev = [
{ name = "ruff", specifier = ">=0.7.1" },
]
[[package]]
name = "mdurl"
version = "0.1.2"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729, upload-time = "2022-08-14T12:40:10.846Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" },
]
[[package]]
name = "mpmath"
version = "1.3.0"
@ -2283,6 +2310,19 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/3f/51/d4db610ef29373b879047326cbf6fa98b6c1969d6f6dc423279de2b1be2c/requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06", size = 54481, upload-time = "2023-05-01T04:11:28.427Z" },
]
[[package]]
name = "rich"
version = "14.2.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "markdown-it-py" },
{ name = "pygments" },
]
sdist = { url = "https://files.pythonhosted.org/packages/fb/d2/8920e102050a0de7bfabeb4c4614a49248cf8d5d7a8d01885fbb24dc767a/rich-14.2.0.tar.gz", hash = "sha256:73ff50c7c0c1c77c8243079283f4edb376f0f6442433aecb8ce7e6d0b92d1fe4", size = 219990, upload-time = "2025-10-09T14:16:53.064Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/25/7a/b0178788f8dc6cafce37a212c99565fa1fe7872c70c6c9c1e1a372d9d88f/rich-14.2.0-py3-none-any.whl", hash = "sha256:76bc51fe2e57d2b1be1f96c524b890b816e334ab4c1e45888799bfaab0021edd", size = 243393, upload-time = "2025-10-09T14:16:51.245Z" },
]
[[package]]
name = "rsa"
version = "4.9.1"
@ -2466,6 +2506,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922", size = 1201486, upload-time = "2025-05-27T00:56:49.664Z" },
]
[[package]]
name = "shellingham"
version = "1.5.4"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/58/15/8b3609fd3830ef7b27b655beb4b4e9c62313a4e8da8c676e142cc210d58e/shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de", size = 10310, upload-time = "2023-10-24T04:13:40.426Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755, upload-time = "2023-10-24T04:13:38.866Z" },
]
[[package]]
name = "six"
version = "1.17.0"
@ -2702,6 +2751,21 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/20/63/8cb444ad5cdb25d999b7d647abac25af0ee37d292afc009940c05b82dda0/triton-3.4.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7936b18a3499ed62059414d7df563e6c163c5e16c3773678a3ee3d417865035d", size = 155659780, upload-time = "2025-07-30T19:58:51.171Z" },
]
[[package]]
name = "typer"
version = "0.20.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "click" },
{ name = "rich" },
{ name = "shellingham" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/8f/28/7c85c8032b91dbe79725b6f17d2fffc595dff06a35c7a30a37bef73a1ab4/typer-0.20.0.tar.gz", hash = "sha256:1aaf6494031793e4876fb0bacfa6a912b551cf43c1e63c800df8b1a866720c37", size = 106492, upload-time = "2025-10-20T17:03:49.445Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/78/64/7713ffe4b5983314e9d436a90d5bd4f63b6054e2aca783a3cfc44cb95bbf/typer-0.20.0-py3-none-any.whl", hash = "sha256:5b463df6793ec1dca6213a3cf4c0f03bc6e322ac5e16e13ddd622a889489784a", size = 47028, upload-time = "2025-10-20T17:03:47.617Z" },
]
[[package]]
name = "typing-extensions"
version = "4.14.0"

View file

@ -1,4 +1,5 @@
OPENAI_API_KEY=
CHUTES_API_TOKEN=
NEO4J_PORT=7687
# Only used if not running a neo4j container in docker
NEO4J_URI=bolt://localhost:7687