From 6a29b5daa0ab58b21a18b4b0e3b2e561895a339f Mon Sep 17 00:00:00 2001 From: yangdx Date: Thu, 23 Oct 2025 13:53:26 +0800 Subject: [PATCH] Update Docker deployment comments for LLM and embedding hosts --- env.example | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/env.example b/env.example index 828f962e..3c5113ff 100644 --- a/env.example +++ b/env.example @@ -208,6 +208,7 @@ OPENAI_LLM_MAX_COMPLETION_TOKENS=9000 # OPENAI_LLM_EXTRA_BODY='{"chat_template_kwargs": {"enable_thinking": false}}' ### use the following command to see all support options for Ollama LLM +### If LightRAG deployed in Docker uses host.docker.internal instead of localhost in LLM_BINDING_HOST ### lightrag-server --llm-binding ollama --help ### Ollama Server Specific Parameters ### OLLAMA_LLM_NUM_CTX must be provided, and should at least larger than MAX_TOTAL_TOKENS + 2000 @@ -229,7 +230,7 @@ EMBEDDING_BINDING=ollama EMBEDDING_MODEL=bge-m3:latest EMBEDDING_DIM=1024 EMBEDDING_BINDING_API_KEY=your_api_key -# If the embedding service is deployed within the same Docker stack, use host.docker.internal instead of localhost +# If LightRAG deployed in Docker uses host.docker.internal instead of localhost EMBEDDING_BINDING_HOST=http://localhost:11434 ### OpenAI compatible (VoyageAI embedding openai compatible)