From 0dd245e84786819e5a7698502df9e97481599e69 Mon Sep 17 00:00:00 2001 From: yangdx Date: Thu, 21 Aug 2025 11:04:06 +0800 Subject: [PATCH] Add OpenAI reasoning effort and max completion tokens config options --- env.example | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/env.example b/env.example index 60980b96..5646874e 100644 --- a/env.example +++ b/env.example @@ -147,8 +147,10 @@ LLM_BINDING_API_KEY=your_api_key # LLM_BINDING=openai ### OpenAI Specific Parameters -### Apply frequency penalty to prevent the LLM from generating repetitive or looping outputs # OPENAI_LLM_TEMPERATURE=1.0 +# OPENAI_LLM_REASONING_EFFORT=low +### Set the maximum number of completion tokens if your LLM generates repetitive or unconstrained output +# OPENAI_LLM_MAX_COMPLETION_TOKENS=16384 ### use the following command to see all support options for openai and azure_openai ### lightrag-server --llm-binding openai --help