fix: replace deprecated gemini-2.5-flash-lite-preview with gemini-2.5-flash-lite (#1076)
fix: replace deprecated gemini-2.5-flash-lite-preview-06-17 with gemini-2.5-flash-lite Updated all references to the deprecated Gemini model in: - graphiti_core/llm_client/gemini_client.py - graphiti_core/cross_encoder/gemini_reranker_client.py - tests/llm_client/test_gemini_client.py - README.md This resolves 404 errors when using Gemini clients. Fixes #1075 🤖 Generated with [Claude Code](https://claude.ai/code) Co-authored-by: claude[bot] <41898282+claude[bot]@users.noreply.github.com> Co-authored-by: Daniel Chalef <danielchalef@users.noreply.github.com>
This commit is contained in:
parent
d2654003ff
commit
ae78828f9c
4 changed files with 4 additions and 6 deletions
|
|
@ -479,7 +479,7 @@ graphiti = Graphiti(
|
||||||
cross_encoder=GeminiRerankerClient(
|
cross_encoder=GeminiRerankerClient(
|
||||||
config=LLMConfig(
|
config=LLMConfig(
|
||||||
api_key=api_key,
|
api_key=api_key,
|
||||||
model="gemini-2.5-flash-lite-preview-06-17"
|
model="gemini-2.5-flash-lite"
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
@ -487,7 +487,7 @@ graphiti = Graphiti(
|
||||||
# Now you can use Graphiti with Google Gemini for all components
|
# Now you can use Graphiti with Google Gemini for all components
|
||||||
```
|
```
|
||||||
|
|
||||||
The Gemini reranker uses the `gemini-2.5-flash-lite-preview-06-17` model by default, which is optimized for
|
The Gemini reranker uses the `gemini-2.5-flash-lite` model by default, which is optimized for
|
||||||
cost-effective and low-latency classification tasks. It uses the same boolean classification approach as the OpenAI
|
cost-effective and low-latency classification tasks. It uses the same boolean classification approach as the OpenAI
|
||||||
reranker, leveraging Gemini's log probabilities feature to rank passage relevance.
|
reranker, leveraging Gemini's log probabilities feature to rank passage relevance.
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -37,7 +37,7 @@ else:
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
DEFAULT_MODEL = 'gemini-2.5-flash-lite-preview-06-17'
|
DEFAULT_MODEL = 'gemini-2.5-flash-lite'
|
||||||
|
|
||||||
|
|
||||||
class GeminiRerankerClient(CrossEncoderClient):
|
class GeminiRerankerClient(CrossEncoderClient):
|
||||||
|
|
|
||||||
|
|
@ -45,7 +45,7 @@ else:
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
DEFAULT_MODEL = 'gemini-2.5-flash'
|
DEFAULT_MODEL = 'gemini-2.5-flash'
|
||||||
DEFAULT_SMALL_MODEL = 'gemini-2.5-flash-lite-preview-06-17'
|
DEFAULT_SMALL_MODEL = 'gemini-2.5-flash-lite'
|
||||||
|
|
||||||
# Maximum output tokens for different Gemini models
|
# Maximum output tokens for different Gemini models
|
||||||
GEMINI_MODEL_MAX_TOKENS = {
|
GEMINI_MODEL_MAX_TOKENS = {
|
||||||
|
|
@ -53,7 +53,6 @@ GEMINI_MODEL_MAX_TOKENS = {
|
||||||
'gemini-2.5-pro': 65536,
|
'gemini-2.5-pro': 65536,
|
||||||
'gemini-2.5-flash': 65536,
|
'gemini-2.5-flash': 65536,
|
||||||
'gemini-2.5-flash-lite': 64000,
|
'gemini-2.5-flash-lite': 64000,
|
||||||
'models/gemini-2.5-flash-lite-preview-06-17': 64000,
|
|
||||||
# Gemini 2.0 models
|
# Gemini 2.0 models
|
||||||
'gemini-2.0-flash': 8192,
|
'gemini-2.0-flash': 8192,
|
||||||
'gemini-2.0-flash-lite': 8192,
|
'gemini-2.0-flash-lite': 8192,
|
||||||
|
|
|
||||||
|
|
@ -455,7 +455,6 @@ class TestGeminiClientGenerateResponse:
|
||||||
('gemini-2.5-flash', 65536),
|
('gemini-2.5-flash', 65536),
|
||||||
('gemini-2.5-pro', 65536),
|
('gemini-2.5-pro', 65536),
|
||||||
('gemini-2.5-flash-lite', 64000),
|
('gemini-2.5-flash-lite', 64000),
|
||||||
('models/gemini-2.5-flash-lite-preview-06-17', 64000),
|
|
||||||
('gemini-2.0-flash', 8192),
|
('gemini-2.0-flash', 8192),
|
||||||
('gemini-1.5-pro', 8192),
|
('gemini-1.5-pro', 8192),
|
||||||
('gemini-1.5-flash', 8192),
|
('gemini-1.5-flash', 8192),
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue