diff --git a/env.example b/env.example index ab8c6ac9..1ad15243 100644 --- a/env.example +++ b/env.example @@ -177,6 +177,11 @@ LLM_BINDING_API_KEY=your_api_key ### OpenAI Compatible API Specific Parameters ### Set the max_tokens to mitigate endless output of some LLM (less than LLM_TIMEOUT * llm_output_tokens/second, i.e. 9000 = 180s * 50 tokens/s) ### Typically, max_tokens does not include prompt content, though some models, such as Gemini Models, are exceptions +### For vLLM/SGLang doployed models, or most of OpenAI compatible API provider +# OPENAI_LLM_MAX_TOKENS=9000 +### For OpenAI o1-mini or newer modles +OPENAI_LLM_MAX_COMPLETION_TOKENS=9000 + #### OpenAI's new API utilizes max_completion_tokens instead of max_tokens # OPENAI_LLM_MAX_TOKENS=9000 # OPENAI_LLM_MAX_COMPLETION_TOKENS=9000