Fix logging message formatting

(cherry picked from commit e0fd31a60d)
This commit is contained in:
yangdx 2025-10-20 22:09:09 +08:00 committed by Raphaël MANSUY
parent 80dcbc696a
commit 6de4bb9113
3 changed files with 634 additions and 160 deletions

View file

@ -1,22 +1,5 @@
### This is sample file of .env
###############################################################################
### ⚡️ QUICK START: OpenAI Configuration (Recommended)
###############################################################################
### To get started with OpenAI, you only need:
### 1. Set your OpenAI API key (get from https://platform.openai.com/api-keys)
### export OPENAI_API_KEY="sk-your-actual-api-key"
### 2. Then you can start the server with default OpenAI configuration:
### lightrag-server
###
### The default configuration will use:
### - LLM: gpt-4o-mini (entity/relation extraction, graph merging, query answering)
### - Embedding: text-embedding-3-small (vector embeddings)
### No additional configuration needed!
###
### See LLM and Embedding Configuration sections below to customize models
###############################################################################
###########################
### Server Configuration
###########################
@ -40,13 +23,13 @@ WEBUI_DESCRIPTION="Simple and Fast Graph Based RAG System"
# WORKING_DIR=<absolute_path_for_working_dir>
### Tiktoken cache directory (Store cached files in this folder for offline deployment)
# TIKTOKEN_CACHE_DIR=./temp/tiktoken
# TIKTOKEN_CACHE_DIR=/app/data/tiktoken
### Ollama Emulating Model and Tag (used only when EMBEDDING_BINDING=ollama)
### Ollama Emulating Model and Tag
# OLLAMA_EMULATING_MODEL_NAME=lightrag
OLLAMA_EMULATING_MODEL_TAG=latest
### Max nodes return from grap retrieval in webui
### Max nodes return from graph retrieval in webui
# MAX_GRAPH_NODES=1000
### Logging level
@ -67,44 +50,30 @@ OLLAMA_EMULATING_MODEL_TAG=latest
# JWT_ALGORITHM=HS256
### API-Key to access LightRAG Server API
### Use this key in HTTP requests with the 'X-API-Key' header
### Example: curl -H "X-API-Key: your-secure-api-key-here" http://localhost:9621/query
# LIGHTRAG_API_KEY=your-secure-api-key-here
# WHITELIST_PATHS=/health,/api/*
###############################################################################
### OpenAI API Key (Required for OpenAI LLM and Embedding)
### Get your key from: https://platform.openai.com/api-keys
### This is the PRIMARY way to configure OpenAI (environment variable takes precedence)
###############################################################################
# OPENAI_API_KEY=sk-your-actual-openai-api-key-here
######################################################################################
### Query Configuration
###
### How to control the context lenght sent to LLM:
### How to control the context length sent to LLM:
### MAX_ENTITY_TOKENS + MAX_RELATION_TOKENS < MAX_TOTAL_TOKENS
### Chunk_Tokens = MAX_TOTAL_TOKENS - Actual_Entity_Tokens - Actual_Reation_Tokens
### Chunk_Tokens = MAX_TOTAL_TOKENS - Actual_Entity_Tokens - Actual_Relation_Tokens
######################################################################################
# LLM responde cache for query (Not valid for streaming response)
# LLM response cache for query (Not valid for streaming response)
ENABLE_LLM_CACHE=true
# COSINE_THRESHOLD=0.2
### Number of entities or relations retrieved from KG
# TOP_K=40
### Maxmium number or chunks for naive vector search
### Maximum number or chunks for naive vector search
# CHUNK_TOP_K=20
### control the actual enties send to LLM
### control the actual entities send to LLM
# MAX_ENTITY_TOKENS=6000
### control the actual relations send to LLM
# MAX_RELATION_TOKENS=8000
### control the maximum tokens send to LLM (include entities, raltions and chunks)
### control the maximum tokens send to LLM (include entities, relations and chunks)
# MAX_TOTAL_TOKENS=30000
### maximum number of related chunks per source entity or relation
### The chunk picker uses this value to determine the total number of chunks selected from KG(knowledge graph)
### Higher values increase re-ranking time
# RELATED_CHUNK_NUMBER=5
### chunk selection strategies
### VECTOR: Pick KG chunks by vector similarity, delivered chunks to the LLM aligning more closely with naive retrieval
### WEIGHT: Pick KG chunks by entity and chunk weight, delivered more solely KG related chunks to the LLM
@ -119,7 +88,7 @@ ENABLE_LLM_CACHE=true
RERANK_BINDING=null
### Enable rerank by default in query params when RERANK_BINDING is not null
# RERANK_BY_DEFAULT=True
### rerank score chunk filter(set to 0.0 to keep all chunks, 0.6 or above if LLM is not strong enought)
### rerank score chunk filter(set to 0.0 to keep all chunks, 0.6 or above if LLM is not strong enough)
# MIN_RERANK_SCORE=0.0
### For local deployment with vLLM
@ -157,7 +126,7 @@ SUMMARY_LANGUAGE=English
# CHUNK_SIZE=1200
# CHUNK_OVERLAP_SIZE=100
### Number of summary semgments or tokens to trigger LLM summary on entity/relation merge (at least 3 is recommented)
### Number of summary segments or tokens to trigger LLM summary on entity/relation merge (at least 3 is recommended)
# FORCE_LLM_SUMMARY_ON_MERGE=8
### Max description token size to trigger LLM summary
# SUMMARY_MAX_TOKENS = 1200
@ -166,6 +135,19 @@ SUMMARY_LANGUAGE=English
### Maximum context size sent to LLM for description summary
# SUMMARY_CONTEXT_SIZE=12000
### control the maximum chunk_ids stored in vector and graph db
# MAX_SOURCE_IDS_PER_ENTITY=300
# MAX_SOURCE_IDS_PER_RELATION=300
### control chunk_ids limitation method: KEEP, FIFO (KEEP: Keep oldest, FIFO: First in first out)
# SOURCE_IDS_LIMIT_METHOD=KEEP
### Maximum number of file paths stored in entity/relation file_path field
# MAX_FILE_PATHS=30
### maximum number of related chunks per source entity or relation
### The chunk picker uses this value to determine the total number of chunks selected from KG(knowledge graph)
### Higher values increase re-ranking time
# RELATED_CHUNK_NUMBER=5
###############################
### Concurrency Configuration
###############################
@ -185,17 +167,10 @@ MAX_PARALLEL_INSERT=2
### LLM request timeout setting for all llm (0 means no timeout for Ollma)
# LLM_TIMEOUT=180
# PRIMARY CONFIGURATION: OpenAI (Recommended for production)
LLM_BINDING=openai
LLM_MODEL=gpt-4o-mini
LLM_MODEL=gpt-4o
LLM_BINDING_HOST=https://api.openai.com/v1
LLM_BINDING_API_KEY=your_api_key
# Note: By default, uses OPENAI_API_KEY environment variable
### ALTERNATIVE: Using gpt-4o for higher quality (higher cost)
# LLM_BINDING=openai
# LLM_MODEL=gpt-4o
# LLM_BINDING_HOST=https://api.openai.com/v1
### Optional for Azure
# AZURE_OPENAI_API_VERSION=2024-08-01-preview
@ -212,7 +187,7 @@ LLM_BINDING_API_KEY=your_api_key
# OPENAI_LLM_TEMPERATURE=0.9
### Set the max_tokens to mitigate endless output of some LLM (less than LLM_TIMEOUT * llm_output_tokens/second, i.e. 9000 = 180s * 50 tokens/s)
### Typically, max_tokens does not include prompt content, though some models, such as Gemini Models, are exceptions
### For vLLM/SGLang doployed models, or most of OpenAI compatible API provider
### For vLLM/SGLang deployed models, or most of OpenAI compatible API provider
# OPENAI_LLM_MAX_TOKENS=9000
### For OpenAI o1-mini or newer modles
OPENAI_LLM_MAX_COMPLETION_TOKENS=9000
@ -226,7 +201,7 @@ OPENAI_LLM_MAX_COMPLETION_TOKENS=9000
# OPENAI_LLM_REASONING_EFFORT=minimal
### OpenRouter Specific Parameters
# OPENAI_LLM_EXTRA_BODY='{"reasoning": {"enabled": false}}'
### Qwen3 Specific Parameters depoly by vLLM
### Qwen3 Specific Parameters deploy by vLLM
# OPENAI_LLM_EXTRA_BODY='{"chat_template_kwargs": {"enable_thinking": false}}'
### use the following command to see all support options for Ollama LLM
@ -244,52 +219,44 @@ OLLAMA_LLM_NUM_CTX=32768
####################################################################################
### Embedding Configuration (Should not be changed after the first file processed)
### EMBEDDING_BINDING: openai, ollama, azure_openai, jina, lollms, aws_bedrock
### PRIMARY CONFIGURATION: OpenAI (Recommended)
### EMBEDDING_BINDING: ollama, openai, azure_openai, jina, lollms, aws_bedrock
####################################################################################
# EMBEDDING_TIMEOUT=30
EMBEDDING_BINDING=openai
EMBEDDING_MODEL=text-embedding-3-small
EMBEDDING_DIM=1536
EMBEDDING_BINDING_HOST=https://api.openai.com/v1
# EMBEDDING_BINDING_API_KEY=your_openai_api_key (uses OPENAI_API_KEY env var by default)
EMBEDDING_BINDING=ollama
EMBEDDING_MODEL=bge-m3:latest
EMBEDDING_DIM=1024
EMBEDDING_BINDING_API_KEY=your_api_key
# If the embedding service is deployed within the same Docker stack, use host.docker.internal instead of localhost
EMBEDDING_BINDING_HOST=http://localhost:11434
### ALTERNATIVE: Text-embedding-3-large (higher quality, higher cost)
### OpenAI compatible (VoyageAI embedding openai compatible)
# EMBEDDING_BINDING=openai
# EMBEDDING_MODEL=text-embedding-3-large
# EMBEDDING_DIM=3072
# EMBEDDING_BINDING_HOST=https://api.openai.com/v1
### ALTERNATIVE: Local Ollama embedding (no API key required, requires Ollama service)
# EMBEDDING_BINDING=ollama
# EMBEDDING_MODEL=bge-m3:latest
# EMBEDDING_DIM=1024
# EMBEDDING_BINDING_HOST=http://localhost:11434
# EMBEDDING_BINDING_API_KEY=your_api_key
# If the embedding service is deployed within the same Docker stack, use host.docker.internal instead of localhost
### ALTERNATIVE: Azure OpenAI embedding
# EMBEDDING_BINDING=azure_openai
# AZURE_EMBEDDING_DEPLOYMENT=text-embedding-3-small
### Optional for Azure
# AZURE_EMBEDDING_DEPLOYMENT=text-embedding-3-large
# AZURE_EMBEDDING_API_VERSION=2023-05-15
# AZURE_EMBEDDING_ENDPOINT=your_endpoint
# AZURE_EMBEDDING_API_KEY=your_api_key
### ALTERNATIVE: Jina AI Embedding
### Jina AI Embedding
# EMBEDDING_BINDING=jina
# EMBEDDING_BINDING_HOST=https://api.jina.ai/v1/embeddings
# EMBEDDING_MODEL=jina-embeddings-v4
# EMBEDDING_DIM=2048
# EMBEDDING_BINDING_API_KEY=your_api_key
### Ollama embedding options (only used when EMBEDDING_BINDING=ollama)
### Optional for Ollama embedding
OLLAMA_EMBEDDING_NUM_CTX=8192
### use the following command to see all support options for Ollama embedding
### lightrag-server --embedding-binding ollama --help
####################################################################
### WORKSPACE setting workspace name for all storage types
### in the purpose of isolating data from LightRAG instances.
### WORKSPACE sets workspace name for all storage types
### for the purpose of isolating data from LightRAG instances.
### Valid workspace name constraints: a-z, A-Z, 0-9, and _
####################################################################
# WORKSPACE=space1

View file

@ -57,8 +57,24 @@ DEFAULT_HISTORY_TURNS = 0
DEFAULT_MIN_RERANK_SCORE = 0.0
DEFAULT_RERANK_BINDING = "null"
# File path configuration for vector and graph database(Should not be changed, used in Milvus Schema)
# Default source ids limit in meta data for entity and relation
DEFAULT_MAX_SOURCE_IDS_PER_ENTITY = 3
DEFAULT_MAX_SOURCE_IDS_PER_RELATION = 3
SOURCE_IDS_LIMIT_METHOD_KEEP = "KEEP" # Keep oldest
SOURCE_IDS_LIMIT_METHOD_FIFO = "FIFO" # First In First Out (Keep newest)
DEFAULT_SOURCE_IDS_LIMIT_METHOD = SOURCE_IDS_LIMIT_METHOD_KEEP
VALID_SOURCE_IDS_LIMIT_METHODS = {
SOURCE_IDS_LIMIT_METHOD_KEEP,
SOURCE_IDS_LIMIT_METHOD_FIFO,
}
# Default file_path limit in meta data for entity and relation (Use same limit method as source_ids)
DEFAULT_MAX_FILE_PATHS = 2
# Field length of file_path in Milvus Schema for entity and relation (Should not be changed)
# file_path must store all file paths up to the DEFAULT_MAX_FILE_PATHS limit within the metadata.
DEFAULT_MAX_FILE_PATH_LENGTH = 32768
# Placeholder for more file paths in meta data for entity and relation (Should not be changed)
DEFAULT_FILE_PATH_MORE_PLACEHOLDER = "truncated"
# Default temperature for LLM
DEFAULT_TEMPERATURE = 1.0

File diff suppressed because it is too large Load diff