graphiti/graphiti_core/llm_client/groq_client.py
claude[bot] 93ab7375cd
feat: eliminate ghost variables with configurable provider defaults system
Replace hardcoded DEFAULT_MODEL and DEFAULT_SMALL_MODEL constants across all LLM clients with a centralized, configurable provider defaults system.

Key changes:
- Created provider_defaults.py with centralized configuration for all providers
- Added environment variable support for easy customization (e.g., GEMINI_DEFAULT_MODEL)
- Updated all LLM clients to use configurable defaults instead of hardcoded constants
- Made edge operations max_tokens configurable via EXTRACT_EDGES_MAX_TOKENS
- Updated cross-encoder reranker clients to use provider defaults
- Maintained full backward compatibility with existing configurations

This resolves the issue where Gemini's flash-lite model has location constraints in Vertex AI that differ from the regular flash model, and users couldn't easily override these without editing source code.

Environment variables now supported:
- {PROVIDER}_DEFAULT_MODEL
- {PROVIDER}_DEFAULT_SMALL_MODEL  
- {PROVIDER}_DEFAULT_MAX_TOKENS
- {PROVIDER}_DEFAULT_TEMPERATURE
- {PROVIDER}_EXTRACT_EDGES_MAX_TOKENS
- EXTRACT_EDGES_MAX_TOKENS (global fallback)

Fixes #681

Co-authored-by: Daniel Chalef <danielchalef@users.noreply.github.com>
2025-07-09 14:59:17 +00:00

86 lines
2.9 KiB
Python

"""
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import logging
import typing
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import groq
from groq import AsyncGroq
from groq.types.chat import ChatCompletionMessageParam
else:
try:
import groq
from groq import AsyncGroq
from groq.types.chat import ChatCompletionMessageParam
except ImportError:
raise ImportError(
'groq is required for GroqClient. Install it with: pip install graphiti-core[groq]'
) from None
from pydantic import BaseModel
from ..prompts.models import Message
from .client import LLMClient
from .config import LLMConfig, ModelSize
from .errors import RateLimitError
from .provider_defaults import get_provider_defaults
logger = logging.getLogger(__name__)
DEFAULT_MAX_TOKENS = 2048
class GroqClient(LLMClient):
def __init__(self, config: LLMConfig | None = None, cache: bool = False):
if config is None:
config = LLMConfig(max_tokens=DEFAULT_MAX_TOKENS)
elif config.max_tokens is None:
config.max_tokens = DEFAULT_MAX_TOKENS
super().__init__(config, cache)
self.client = AsyncGroq(api_key=config.api_key)
async def _generate_response(
self,
messages: list[Message],
response_model: type[BaseModel] | None = None,
max_tokens: int = DEFAULT_MAX_TOKENS,
model_size: ModelSize = ModelSize.medium,
) -> dict[str, typing.Any]:
msgs: list[ChatCompletionMessageParam] = []
for m in messages:
if m.role == 'user':
msgs.append({'role': 'user', 'content': m.content})
elif m.role == 'system':
msgs.append({'role': 'system', 'content': m.content})
try:
model = self.model or get_provider_defaults('groq').model
response = await self.client.chat.completions.create(
model=model,
messages=msgs,
temperature=self.temperature,
max_tokens=max_tokens or self.max_tokens,
response_format={'type': 'json_object'},
)
result = response.choices[0].message.content or ''
return json.loads(result)
except groq.RateLimitError as e:
raise RateLimitError from e
except Exception as e:
logger.error(f'Error in generating LLM response: {e}')
raise