* feat(gemini): embedding batch size & lite default The new `gemini-embedding-001` model only allows one embedding input per batch (instance), but has other impressive statistics: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/text-embeddings-api The -DEFAULT_SMALL_MODEL must not have the 'models/' prefix. * Refactor: Improve Gemini Client Error Handling and Reliability This commit introduces several improvements to the Gemini client to enhance its robustness and reliability. - Implemented more specific error handling for various Gemini API responses, including rate limits and safety blocks. - Added a JSON salvaging mechanism to gracefully handle incomplete or malformed JSON responses from the API. - Introduced detailed logging for failed LLM generations to simplify debugging and troubleshooting. - Refined the Gemini embedder to better handle empty or invalid embedding responses. - Updated and corrected tests to align with the improved error handling and reliability features. * fix: cleanup in _log_failed_generation() * fix: cleanup in _log_failed_generation() * Fix ruff B904 error in gemini_client.py * fix(gemini): correct retry logic and enhance error logging Updated the retry mechanism in the GeminiClient to ensure it retries the maximum number of times specified. Improved error logging to provide clearer insights when all retries are exhausted, including detailed information about the last error encountered. * fix(gemini): enhance error handling for safety blocks and update tests Refined error handling in the GeminiClient to improve detection of safety block conditions. Updated test cases to reflect changes in exception messages and ensure proper retry logic is enforced. Enhanced mock responses in tests to better simulate real-world scenarios, including handling of invalid JSON responses. * revert default gemini to text-embedding-001 --------- Co-authored-by: Daniel Chalef <131175+danielchalef@users.noreply.github.com>
184 lines
6.3 KiB
Python
184 lines
6.3 KiB
Python
"""
|
|
Copyright 2024, Zep Software, Inc.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
"""
|
|
|
|
import hashlib
|
|
import json
|
|
import logging
|
|
import typing
|
|
from abc import ABC, abstractmethod
|
|
|
|
import httpx
|
|
from diskcache import Cache
|
|
from pydantic import BaseModel
|
|
from tenacity import retry, retry_if_exception, stop_after_attempt, wait_random_exponential
|
|
|
|
from ..prompts.models import Message
|
|
from .config import DEFAULT_MAX_TOKENS, LLMConfig, ModelSize
|
|
from .errors import RateLimitError
|
|
|
|
DEFAULT_TEMPERATURE = 0
|
|
DEFAULT_CACHE_DIR = './llm_cache'
|
|
|
|
MULTILINGUAL_EXTRACTION_RESPONSES = (
|
|
'\n\nAny extracted information should be returned in the same language as it was written in.'
|
|
)
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
def is_server_or_retry_error(exception):
|
|
if isinstance(exception, RateLimitError | json.decoder.JSONDecodeError):
|
|
return True
|
|
|
|
return (
|
|
isinstance(exception, httpx.HTTPStatusError) and 500 <= exception.response.status_code < 600
|
|
)
|
|
|
|
|
|
class LLMClient(ABC):
|
|
def __init__(self, config: LLMConfig | None, cache: bool = False):
|
|
if config is None:
|
|
config = LLMConfig()
|
|
|
|
self.config = config
|
|
self.model = config.model
|
|
self.small_model = config.small_model
|
|
self.temperature = config.temperature
|
|
self.max_tokens = config.max_tokens
|
|
self.cache_enabled = cache
|
|
self.cache_dir = None
|
|
|
|
# Only create the cache directory if caching is enabled
|
|
if self.cache_enabled:
|
|
self.cache_dir = Cache(DEFAULT_CACHE_DIR)
|
|
|
|
def _clean_input(self, input: str) -> str:
|
|
"""Clean input string of invalid unicode and control characters.
|
|
|
|
Args:
|
|
input: Raw input string to be cleaned
|
|
|
|
Returns:
|
|
Cleaned string safe for LLM processing
|
|
"""
|
|
# Clean any invalid Unicode
|
|
cleaned = input.encode('utf-8', errors='ignore').decode('utf-8')
|
|
|
|
# Remove zero-width characters and other invisible unicode
|
|
zero_width = '\u200b\u200c\u200d\ufeff\u2060'
|
|
for char in zero_width:
|
|
cleaned = cleaned.replace(char, '')
|
|
|
|
# Remove control characters except newlines, returns, and tabs
|
|
cleaned = ''.join(char for char in cleaned if ord(char) >= 32 or char in '\n\r\t')
|
|
|
|
return cleaned
|
|
|
|
@retry(
|
|
stop=stop_after_attempt(4),
|
|
wait=wait_random_exponential(multiplier=10, min=5, max=120),
|
|
retry=retry_if_exception(is_server_or_retry_error),
|
|
after=lambda retry_state: logger.warning(
|
|
f'Retrying {retry_state.fn.__name__ if retry_state.fn else "function"} after {retry_state.attempt_number} attempts...'
|
|
)
|
|
if retry_state.attempt_number > 1
|
|
else None,
|
|
reraise=True,
|
|
)
|
|
async def _generate_response_with_retry(
|
|
self,
|
|
messages: list[Message],
|
|
response_model: type[BaseModel] | None = None,
|
|
max_tokens: int = DEFAULT_MAX_TOKENS,
|
|
model_size: ModelSize = ModelSize.medium,
|
|
) -> dict[str, typing.Any]:
|
|
try:
|
|
return await self._generate_response(messages, response_model, max_tokens, model_size)
|
|
except (httpx.HTTPStatusError, RateLimitError) as e:
|
|
raise e
|
|
|
|
@abstractmethod
|
|
async def _generate_response(
|
|
self,
|
|
messages: list[Message],
|
|
response_model: type[BaseModel] | None = None,
|
|
max_tokens: int = DEFAULT_MAX_TOKENS,
|
|
model_size: ModelSize = ModelSize.medium,
|
|
) -> dict[str, typing.Any]:
|
|
pass
|
|
|
|
def _get_cache_key(self, messages: list[Message]) -> str:
|
|
# Create a unique cache key based on the messages and model
|
|
message_str = json.dumps([m.model_dump() for m in messages], sort_keys=True)
|
|
key_str = f'{self.model}:{message_str}'
|
|
return hashlib.md5(key_str.encode()).hexdigest()
|
|
|
|
async def generate_response(
|
|
self,
|
|
messages: list[Message],
|
|
response_model: type[BaseModel] | None = None,
|
|
max_tokens: int | None = None,
|
|
model_size: ModelSize = ModelSize.medium,
|
|
) -> dict[str, typing.Any]:
|
|
if max_tokens is None:
|
|
max_tokens = self.max_tokens
|
|
|
|
if response_model is not None:
|
|
serialized_model = json.dumps(response_model.model_json_schema())
|
|
messages[
|
|
-1
|
|
].content += (
|
|
f'\n\nRespond with a JSON object in the following format:\n\n{serialized_model}'
|
|
)
|
|
|
|
# Add multilingual extraction instructions
|
|
messages[0].content += MULTILINGUAL_EXTRACTION_RESPONSES
|
|
|
|
if self.cache_enabled and self.cache_dir is not None:
|
|
cache_key = self._get_cache_key(messages)
|
|
|
|
cached_response = self.cache_dir.get(cache_key)
|
|
if cached_response is not None:
|
|
logger.debug(f'Cache hit for {cache_key}')
|
|
return cached_response
|
|
|
|
for message in messages:
|
|
message.content = self._clean_input(message.content)
|
|
|
|
response = await self._generate_response_with_retry(
|
|
messages, response_model, max_tokens, model_size
|
|
)
|
|
|
|
if self.cache_enabled and self.cache_dir is not None:
|
|
cache_key = self._get_cache_key(messages)
|
|
self.cache_dir.set(cache_key, response)
|
|
|
|
return response
|
|
|
|
def _get_failed_generation_log(self, messages: list[Message], output: str | None) -> str:
|
|
"""
|
|
Log the full input messages, the raw output (if any), and the exception for debugging failed generations.
|
|
"""
|
|
log = ""
|
|
log += f"Input messages: {json.dumps([m.model_dump() for m in messages], indent=2)}\n"
|
|
if output is not None:
|
|
if len(output) > 4000:
|
|
log += f"Raw output: {output[:2000]}... (truncated) ...{output[-2000:]}\n"
|
|
else:
|
|
log += f"Raw output: {output}\n"
|
|
else:
|
|
log += "No raw output available"
|
|
return log
|