From acd54afdeff6cf44ec38dafeb5b3865cae8b27b1 Mon Sep 17 00:00:00 2001 From: supmo668 Date: Mon, 3 Nov 2025 17:53:00 -0800 Subject: [PATCH] Clarify that max_tokens mapping represents standard limits Updated comments to explicitly state that ANTHROPIC_MODEL_MAX_TOKENS represents standard limits without beta headers. This prevents confusion about extended limits (e.g., Claude 3.7's 128K with beta header) which are not currently implemented in this mapping. --- graphiti_core/llm_client/anthropic_client.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/graphiti_core/llm_client/anthropic_client.py b/graphiti_core/llm_client/anthropic_client.py index b757011a..fc64c969 100644 --- a/graphiti_core/llm_client/anthropic_client.py +++ b/graphiti_core/llm_client/anthropic_client.py @@ -66,8 +66,11 @@ DEFAULT_MODEL: AnthropicModel = 'claude-3-7-sonnet-latest' # Maximum output tokens for different Anthropic models # Based on official Anthropic documentation (as of 2025) +# Note: These represent standard limits without beta headers. +# Some models support higher limits with additional configuration (e.g., Claude 3.7 supports +# 128K with 'anthropic-beta: output-128k-2025-02-19' header, but this is not currently implemented). ANTHROPIC_MODEL_MAX_TOKENS = { - # Claude 3.7 models - 64K tokens (128K with beta header) + # Claude 3.7 models - standard 64K tokens 'claude-3-7-sonnet-latest': 65536, 'claude-3-7-sonnet-20250219': 65536, # Claude 3.5 models