diff --git a/env.example b/env.example index 5646874e..90bd5a67 100644 --- a/env.example +++ b/env.example @@ -149,7 +149,9 @@ LLM_BINDING_API_KEY=your_api_key ### OpenAI Specific Parameters # OPENAI_LLM_TEMPERATURE=1.0 # OPENAI_LLM_REASONING_EFFORT=low -### Set the maximum number of completion tokens if your LLM generates repetitive or unconstrained output +### For models like Qwen3 with fewer than 32B param, it is recommended to set the presence penalty to 1.5 +# OPENAI_LLM_PRESENCE_PENALTY=1.5 +### If the presence penalty still can not stop the model from generates repetitive or unconstrained output # OPENAI_LLM_MAX_COMPLETION_TOKENS=16384 ### use the following command to see all support options for openai and azure_openai ### lightrag-server --llm-binding openai --help