diff --git a/env.example b/env.example index ce666600..6463db5b 100644 --- a/env.example +++ b/env.example @@ -187,14 +187,17 @@ OPENAI_LLM_MAX_COMPLETION_TOKENS=9000 #### OpenAI's new API utilizes max_completion_tokens instead of max_tokens # OPENAI_LLM_MAX_COMPLETION_TOKENS=9000 +### use the following command to see all support options for OpenAI, azure_openai or OpenRouter +### lightrag-server --llm-binding openai --help +### OpenAI Specific Parameters +# OPENAI_LLM_REASONING_EFFORT=minimal ### OpenRouter Specific Parameters # OPENAI_LLM_EXTRA_BODY='{"reasoning": {"enabled": false}}' ### Qwen3 Specific Parameters depoly by vLLM # OPENAI_LLM_EXTRA_BODY='{"chat_template_kwargs": {"enable_thinking": false}}' -### use the following command to see all support options for OpenAI, azure_openai or OpenRouter -### lightrag-server --llm-binding openai --help - +### use the following command to see all support options for Ollama LLM +### lightrag-server --llm-binding ollama --help ### Ollama Server Specific Parameters ### OLLAMA_LLM_NUM_CTX must be provided, and should at least larger than MAX_TOTAL_TOKENS + 2000 OLLAMA_LLM_NUM_CTX=32768 @@ -202,8 +205,6 @@ OLLAMA_LLM_NUM_CTX=32768 # OLLAMA_LLM_NUM_PREDICT=9000 ### Stop sequences for Ollama LLM # OLLAMA_LLM_STOP='["", "<|EOT|>"]' -### use the following command to see all support options for Ollama LLM -### lightrag-server --llm-binding ollama --help ### Bedrock Specific Parameters # BEDROCK_LLM_TEMPERATURE=1.0