From 87f1b472183475e01115e81b6b4472cec6e8982b Mon Sep 17 00:00:00 2001 From: yangdx Date: Thu, 11 Sep 2025 15:50:16 +0800 Subject: [PATCH] Update env.examples --- env.example | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/env.example b/env.example index e18ee2fb..c5ec229e 100644 --- a/env.example +++ b/env.example @@ -176,7 +176,7 @@ LLM_BINDING_API_KEY=your_api_key ### OpenAI Compatible API Specific Parameters ### Set the max_tokens to mitigate endless output of some LLM (less than LLM_TIMEOUT * llm_output_tokens/second, i.e. 9000 = 180s * 50 tokens/s) -### Typically, max_tokens does not include prompt content, though some models, such as Gemini-2.5-Flash, are exceptions +### Typically, max_tokens does not include prompt content, though some models, such as Gemini Models, are exceptions #### OpenAI's new API utilizes max_completion_tokens instead of max_tokens # OPENAI_LLM_MAX_TOKENS=9000 # OPENAI_LLM_MAX_COMPLETION_TOKENS=9000