diff --git a/env.example b/env.example index 87b8eccb..6a18a68c 100644 --- a/env.example +++ b/env.example @@ -178,8 +178,6 @@ LLM_BINDING_API_KEY=your_api_key ### To mitigate endless output loops and prevent greedy decoding for Qwen3, set the temperature parameter to a value between 0.8 and 1.0 # OPENAI_LLM_TEMPERATURE=1.0 # OPENAI_LLM_REASONING_EFFORT=low -### For models like Qwen3 with fewer than 32B param, it is recommended to set the presence penalty to 1.5 -# OPENAI_LLM_PRESENCE_PENALTY=1.5 ### If the presence penalty still can not stop the model from generates repetitive or unconstrained output # OPENAI_LLM_MAX_COMPLETION_TOKENS=16384