From d39afcb8318e361e6db019b95318c81526ea6dff Mon Sep 17 00:00:00 2001 From: yangdx Date: Fri, 29 Aug 2025 15:13:52 +0800 Subject: [PATCH] Add temperature guidance for Qwen3 models in env example --- env.example | 1 + 1 file changed, 1 insertion(+) diff --git a/env.example b/env.example index 35a1e5fa..41abd61b 100644 --- a/env.example +++ b/env.example @@ -175,6 +175,7 @@ LLM_BINDING_API_KEY=your_api_key # LLM_BINDING=openai ### OpenAI Specific Parameters +### To mitigate endless output loops and prevent greedy decoding for Qwen3, set the temperature parameter to a value between 0.8 and 1.0 # OPENAI_LLM_TEMPERATURE=1.0 # OPENAI_LLM_REASONING_EFFORT=low ### For models like Qwen3 with fewer than 32B param, it is recommended to set the presence penalty to 1.5