diff --git a/README-zh.md b/README-zh.md index f53351b1..5caefc89 100644 --- a/README-zh.md +++ b/README-zh.md @@ -265,7 +265,7 @@ if __name__ == "__main__": | **embedding_func_max_async** | `int` | 最大并发异步嵌入进程数 | `16` | | **llm_model_func** | `callable` | LLM生成的函数 | `gpt_4o_mini_complete` | | **llm_model_name** | `str` | 用于生成的LLM模型名称 | `meta-llama/Llama-3.2-1B-Instruct` | -| **summary_max_tokens** | `int` | 生成实体关系摘要时送给LLM的最大令牌数 | `32000`(默认值由环境变量MAX_TOKENS更改) | +| **summary_max_tokens** | `int` | 生成实体关系摘要时送给LLM的最大令牌数 | `32000`(由环境变量 SUMMARY_MAX_TOKENS 设置) | | **llm_model_max_async** | `int` | 最大并发异步LLM进程数 | `4`(默认值由环境变量MAX_ASYNC更改) | | **llm_model_kwargs** | `dict` | LLM生成的附加参数 | | | **vector_db_storage_cls_kwargs** | `dict` | 向量数据库的附加参数,如设置节点和关系检索的阈值 | cosine_better_than_threshold: 0.2(默认值由环境变量COSINE_THRESHOLD更改) | diff --git a/README.md b/README.md index 4299896c..51c68a1a 100644 --- a/README.md +++ b/README.md @@ -272,7 +272,7 @@ A full list of LightRAG init parameters: | **embedding_func_max_async** | `int` | Maximum number of concurrent asynchronous embedding processes | `16` | | **llm_model_func** | `callable` | Function for LLM generation | `gpt_4o_mini_complete` | | **llm_model_name** | `str` | LLM model name for generation | `meta-llama/Llama-3.2-1B-Instruct` | -| **summary_max_tokens** | `int` | Maximum tokens send to LLM to generate entity relation summaries | `32000`(default value changed by env var MAX_TOKENS) | +| **summary_max_tokens** | `int` | Maximum tokens send to LLM to generate entity relation summaries | `32000`(configured by env var SUMMARY_MAX_TOKENS) | | **llm_model_max_async** | `int` | Maximum number of concurrent asynchronous LLM processes | `4`(default value changed by env var MAX_ASYNC) | | **llm_model_kwargs** | `dict` | Additional parameters for LLM generation | | | **vector_db_storage_cls_kwargs** | `dict` | Additional parameters for vector database, like setting the threshold for nodes and relations retrieval | cosine_better_than_threshold: 0.2(default value changed by env var COSINE_THRESHOLD) | diff --git a/env.example b/env.example index c0fc7567..60980b96 100644 --- a/env.example +++ b/env.example @@ -107,7 +107,7 @@ ENABLE_LLM_CACHE_FOR_EXTRACT=true ### Entity and relation summarization configuration ### Number of duplicated entities/edges to trigger LLM re-summary on merge (at least 3 is recommented), and max tokens send to LLM # FORCE_LLM_SUMMARY_ON_MERGE=4 -# MAX_TOKENS=10000 +# SUMMARY_MAX_TOKENS=10000 ### Maximum number of entity extraction attempts for ambiguous content # MAX_GLEANING=1 @@ -148,18 +148,16 @@ LLM_BINDING_API_KEY=your_api_key ### OpenAI Specific Parameters ### Apply frequency penalty to prevent the LLM from generating repetitive or looping outputs -# OPENAI_LLM_FREQUENCY_PENALTY=1.1 # OPENAI_LLM_TEMPERATURE=1.0 ### use the following command to see all support options for openai and azure_openai ### lightrag-server --llm-binding openai --help ### Ollama Server Specific Parameters -### OLLAMA_LLM_NUM_CTX must be larger than MAX_TOTAL_TOKENS + 2000 +### OLLAMA_LLM_NUM_CTX must be provided, and should at least larger than MAX_TOTAL_TOKENS + 2000 OLLAMA_LLM_NUM_CTX=32768 +# OLLAMA_LLM_TEMPERATURE=1.0 ### Stop sequences for Ollama LLM # OLLAMA_LLM_STOP='["", "Assistant:", "\n\n"]' -### If OLLAMA_LLM_TEMPERATURE is not specified, the system will default to the value defined by TEMPERATURE -# OLLAMA_LLM_TEMPERATURE=0.85 ### use the following command to see all support options for Ollama LLM ### lightrag-server --llm-binding ollama --help diff --git a/lightrag/api/config.py b/lightrag/api/config.py index 756fd7d2..83a56f5a 100644 --- a/lightrag/api/config.py +++ b/lightrag/api/config.py @@ -122,7 +122,7 @@ def parse_args() -> argparse.Namespace: parser.add_argument( "--max-tokens", type=int, - default=get_env_value("MAX_TOKENS", DEFAULT_SUMMARY_MAX_TOKENS, int), + default=get_env_value("SUMMARY_MAX_TOKENS", DEFAULT_SUMMARY_MAX_TOKENS, int), help=f"Maximum token size (default: from env or {DEFAULT_SUMMARY_MAX_TOKENS})", ) diff --git a/lightrag/lightrag.py b/lightrag/lightrag.py index 924c423e..8b214c16 100644 --- a/lightrag/lightrag.py +++ b/lightrag/lightrag.py @@ -283,7 +283,7 @@ class LightRAG: """Name of the LLM model used for generating responses.""" summary_max_tokens: int = field( - default=int(os.getenv("MAX_TOKENS", DEFAULT_SUMMARY_MAX_TOKENS)) + default=int(os.getenv("SUMMARY_MAX_TOKENS", DEFAULT_SUMMARY_MAX_TOKENS)) ) """Maximum number of tokens allowed per LLM response."""