graphiti/graphiti_core/llm_client/chutes_client.py
facronactz a7f2c92bb4 feat: Add support for Chutes API integration
- Introduced CHUTES_API_TOKEN in .env.example files for configuration.
- Updated README.md in quickstart example to include Chutes API setup.
- Created quickstart_chutes.py example for demonstrating Chutes integration.
- Added ChutesEmbedder and ChutesClient classes for embedding and LLM functionalities.
- Updated LLM and embedder provider configurations to support Chutes.
- Enhanced factories to include Chutes client and embedder.
- Modified schema.py to define Chutes provider configuration.
- Updated mcp_server configuration files to integrate Chutes API.
- Added necessary dependencies in pyproject.toml for Chutes support.
2025-11-15 16:12:09 +07:00

90 lines
2.8 KiB
Python

"""
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUTHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import typing
from openai import AsyncOpenAI
from openai.types.chat import ChatCompletionMessageParam
from pydantic import BaseModel
from .config import DEFAULT_MAX_TOKENS, LLMConfig
from .openai_base_client import DEFAULT_REASONING, DEFAULT_VERBOSITY, BaseOpenAIClient
class ChutesClient(BaseOpenAIClient):
"""
ChutesClient is a client class for interacting with Chutes's language models.
"""
def __init__(
self,
config: LLMConfig | None = None,
cache: bool = False,
client: typing.Any = None,
max_tokens: int = DEFAULT_MAX_TOKENS,
reasoning: str = DEFAULT_REASONING,
verbosity: str = DEFAULT_VERBOSITY,
):
"""
Initialize the ChutesClient with the provided configuration, cache setting, and client.
"""
super().__init__(config, cache, max_tokens, reasoning, verbosity)
if config is None:
config = LLMConfig()
if client is None:
self.client = AsyncOpenAI(
api_key=config.api_key,
base_url="https://llm.chutes.ai/v1",
)
else:
self.client = client
async def _create_structured_completion(
self,
model: str,
messages: list[ChatCompletionMessageParam],
temperature: float | None,
max_tokens: int,
response_model: type[BaseModel],
reasoning: str | None = None,
verbosity: str | None = None,
):
"""Create a structured completion using Chutes's API."""
return await self._create_completion(
model, messages, temperature, max_tokens, response_model
)
async def _create_completion(
self,
model: str,
messages: list[ChatCompletionMessageParam],
temperature: float | None,
max_tokens: int,
response_model: type[BaseModel] | None = None,
reasoning: str | None = None,
verbosity: str | None = None,
):
"""Create a regular completion with JSON format."""
return await self.client.chat.completions.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
response_format={"type": "json_object"} if response_model else None,
)