feat: centralize environment variable defaults in constants.py
This commit is contained in:
parent
91d0f65476
commit
e8e1f6ab56
7 changed files with 46 additions and 20 deletions
|
|
@ -294,7 +294,7 @@ class QueryParam:
|
|||
top_k: int = int(os.getenv("TOP_K", "60"))
|
||||
"""Number of top items to retrieve. Represents entities in 'local' mode and relationships in 'global' mode."""
|
||||
|
||||
chunk_top_k: int = int(os.getenv("CHUNK_TOP_K", "5"))
|
||||
chunk_top_k: int = int(os.getenv("CHUNK_TOP_K", "10"))
|
||||
"""Number of text chunks to retrieve initially from vector search and keep after reranking.
|
||||
If None, defaults to top_k value.
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -301,7 +301,7 @@ class QueryParam:
|
|||
top_k: int = int(os.getenv("TOP_K", "60"))
|
||||
"""Number of top items to retrieve. Represents entities in 'local' mode and relationships in 'global' mode."""
|
||||
|
||||
chunk_top_k: int = int(os.getenv("CHUNK_TOP_K", "5"))
|
||||
chunk_top_k: int = int(os.getenv("CHUNK_TOP_K", "10"))
|
||||
"""Number of text chunks to retrieve initially from vector search and keep after reranking.
|
||||
If None, defaults to top_k value.
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -57,9 +57,9 @@ OLLAMA_EMULATING_MODEL_TAG=latest
|
|||
|
||||
# COSINE_THRESHOLD=0.2
|
||||
### Number of entities or relations to retrieve from KG
|
||||
# TOP_K=60
|
||||
# TOP_K=40
|
||||
### Number of text chunks to retrieve initially from vector search and keep after reranking
|
||||
# CHUNK_TOP_K=5
|
||||
# CHUNK_TOP_K=10
|
||||
|
||||
### Enable reranking for retrieved text chunks (default: true)
|
||||
# ENABLE_RERANK=true
|
||||
|
|
|
|||
|
|
@ -11,6 +11,9 @@ from lightrag.utils import get_env_value
|
|||
from lightrag.constants import (
|
||||
DEFAULT_WOKERS,
|
||||
DEFAULT_TIMEOUT,
|
||||
DEFAULT_TOP_K,
|
||||
DEFAULT_CHUNK_TOP_K,
|
||||
DEFAULT_HISTORY_TURNS,
|
||||
)
|
||||
|
||||
# use the .env that is inside the current folder
|
||||
|
|
@ -154,7 +157,7 @@ def parse_args() -> argparse.Namespace:
|
|||
parser.add_argument(
|
||||
"--history-turns",
|
||||
type=int,
|
||||
default=get_env_value("HISTORY_TURNS", 3, int),
|
||||
default=get_env_value("HISTORY_TURNS", DEFAULT_HISTORY_TURNS, int),
|
||||
help="Number of conversation history turns to include (default: from env or 3)",
|
||||
)
|
||||
|
||||
|
|
@ -162,13 +165,13 @@ def parse_args() -> argparse.Namespace:
|
|||
parser.add_argument(
|
||||
"--top-k",
|
||||
type=int,
|
||||
default=get_env_value("TOP_K", 60, int),
|
||||
default=get_env_value("TOP_K", DEFAULT_TOP_K, int),
|
||||
help="Number of most similar results to return (default: from env or 60)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--chunk-top-k",
|
||||
type=int,
|
||||
default=get_env_value("CHUNK_TOP_K", 5, int),
|
||||
default=get_env_value("CHUNK_TOP_K", DEFAULT_CHUNK_TOP_K, int),
|
||||
help="Number of text chunks to retrieve initially from vector search and keep after reranking (default: from env or 5)",
|
||||
)
|
||||
parser.add_argument(
|
||||
|
|
|
|||
|
|
@ -14,7 +14,16 @@ from typing import (
|
|||
)
|
||||
from .utils import EmbeddingFunc
|
||||
from .types import KnowledgeGraph
|
||||
from .constants import GRAPH_FIELD_SEP
|
||||
from .constants import (
|
||||
GRAPH_FIELD_SEP,
|
||||
DEFAULT_TOP_K,
|
||||
DEFAULT_CHUNK_TOP_K,
|
||||
DEFAULT_MAX_ENTITY_TOKENS,
|
||||
DEFAULT_MAX_RELATION_TOKENS,
|
||||
DEFAULT_MAX_TOTAL_TOKENS,
|
||||
DEFAULT_HISTORY_TURNS,
|
||||
DEFAULT_ENABLE_RERANK,
|
||||
)
|
||||
|
||||
# use the .env that is inside the current folder
|
||||
# allows to use different .env file for each lightrag instance
|
||||
|
|
@ -57,21 +66,21 @@ class QueryParam:
|
|||
stream: bool = False
|
||||
"""If True, enables streaming output for real-time responses."""
|
||||
|
||||
top_k: int = int(os.getenv("TOP_K", "60"))
|
||||
top_k: int = int(os.getenv("TOP_K", str(DEFAULT_TOP_K)))
|
||||
"""Number of top items to retrieve. Represents entities in 'local' mode and relationships in 'global' mode."""
|
||||
|
||||
chunk_top_k: int = int(os.getenv("CHUNK_TOP_K", "5"))
|
||||
chunk_top_k: int = int(os.getenv("CHUNK_TOP_K", str(DEFAULT_CHUNK_TOP_K)))
|
||||
"""Number of text chunks to retrieve initially from vector search and keep after reranking.
|
||||
If None, defaults to top_k value.
|
||||
"""
|
||||
|
||||
max_entity_tokens: int = int(os.getenv("MAX_ENTITY_TOKENS", "10000"))
|
||||
max_entity_tokens: int = int(os.getenv("MAX_ENTITY_TOKENS", str(DEFAULT_MAX_ENTITY_TOKENS)))
|
||||
"""Maximum number of tokens allocated for entity context in unified token control system."""
|
||||
|
||||
max_relation_tokens: int = int(os.getenv("MAX_RELATION_TOKENS", "10000"))
|
||||
max_relation_tokens: int = int(os.getenv("MAX_RELATION_TOKENS", str(DEFAULT_MAX_RELATION_TOKENS)))
|
||||
"""Maximum number of tokens allocated for relationship context in unified token control system."""
|
||||
|
||||
max_total_tokens: int = int(os.getenv("MAX_TOTAL_TOKENS", "32000"))
|
||||
max_total_tokens: int = int(os.getenv("MAX_TOTAL_TOKENS", str(DEFAULT_MAX_TOTAL_TOKENS)))
|
||||
"""Maximum total tokens budget for the entire query context (entities + relations + chunks + system prompt)."""
|
||||
|
||||
hl_keywords: list[str] = field(default_factory=list)
|
||||
|
|
@ -85,7 +94,7 @@ class QueryParam:
|
|||
Format: [{"role": "user/assistant", "content": "message"}].
|
||||
"""
|
||||
|
||||
history_turns: int = int(os.getenv("HISTORY_TURNS", "3"))
|
||||
history_turns: int = int(os.getenv("HISTORY_TURNS", str(DEFAULT_HISTORY_TURNS)))
|
||||
"""Number of complete conversation turns (user-assistant pairs) to consider in the response context."""
|
||||
|
||||
ids: list[str] | None = None
|
||||
|
|
@ -102,7 +111,7 @@ class QueryParam:
|
|||
If proivded, this will be use instead of the default vaulue from prompt template.
|
||||
"""
|
||||
|
||||
enable_rerank: bool = os.getenv("ENABLE_RERANK", "true").lower() == "true"
|
||||
enable_rerank: bool = os.getenv("ENABLE_RERANK", str(DEFAULT_ENABLE_RERANK).lower()).lower() == "true"
|
||||
"""Enable reranking for retrieved text chunks. If True but no rerank model is configured, a warning will be issued.
|
||||
Default is True to enable reranking when rerank model is available.
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -13,6 +13,15 @@ DEFAULT_FORCE_LLM_SUMMARY_ON_MERGE = 6
|
|||
DEFAULT_WOKERS = 2
|
||||
DEFAULT_TIMEOUT = 150
|
||||
|
||||
# Query and retrieval configuration defaults
|
||||
DEFAULT_TOP_K = 40
|
||||
DEFAULT_CHUNK_TOP_K = 10
|
||||
DEFAULT_MAX_ENTITY_TOKENS = 10000
|
||||
DEFAULT_MAX_RELATION_TOKENS = 10000
|
||||
DEFAULT_MAX_TOTAL_TOKENS = 32000
|
||||
DEFAULT_HISTORY_TURNS = 3
|
||||
DEFAULT_ENABLE_RERANK = True
|
||||
|
||||
# Separator for graph fields
|
||||
GRAPH_FIELD_SEP = "<SEP>"
|
||||
|
||||
|
|
|
|||
|
|
@ -36,7 +36,12 @@ from .base import (
|
|||
QueryParam,
|
||||
)
|
||||
from .prompt import PROMPTS
|
||||
from .constants import GRAPH_FIELD_SEP
|
||||
from .constants import (
|
||||
GRAPH_FIELD_SEP,
|
||||
DEFAULT_MAX_ENTITY_TOKENS,
|
||||
DEFAULT_MAX_RELATION_TOKENS,
|
||||
DEFAULT_MAX_TOTAL_TOKENS,
|
||||
)
|
||||
from .kg.shared_storage import get_storage_keyed_lock
|
||||
import time
|
||||
from dotenv import load_dotenv
|
||||
|
|
@ -1960,17 +1965,17 @@ async def _build_query_context(
|
|||
max_entity_tokens = getattr(
|
||||
query_param,
|
||||
"max_entity_tokens",
|
||||
text_chunks_db.global_config.get("MAX_ENTITY_TOKENS", 10000),
|
||||
text_chunks_db.global_config.get("MAX_ENTITY_TOKENS", DEFAULT_MAX_ENTITY_TOKENS),
|
||||
)
|
||||
max_relation_tokens = getattr(
|
||||
query_param,
|
||||
"max_relation_tokens",
|
||||
text_chunks_db.global_config.get("MAX_RELATION_TOKENS", 10000),
|
||||
text_chunks_db.global_config.get("MAX_RELATION_TOKENS", DEFAULT_MAX_RELATION_TOKENS),
|
||||
)
|
||||
max_total_tokens = getattr(
|
||||
query_param,
|
||||
"max_total_tokens",
|
||||
text_chunks_db.global_config.get("MAX_TOTAL_TOKENS", 32000),
|
||||
text_chunks_db.global_config.get("MAX_TOTAL_TOKENS", DEFAULT_MAX_TOTAL_TOKENS),
|
||||
)
|
||||
|
||||
# Truncate entities based on complete JSON serialization
|
||||
|
|
@ -2688,7 +2693,7 @@ async def naive_query(
|
|||
# Calculate dynamic token limit for chunks
|
||||
# Get token limits from query_param (with fallback to global_config)
|
||||
max_total_tokens = getattr(
|
||||
query_param, "max_total_tokens", global_config.get("MAX_TOTAL_TOKENS", 32000)
|
||||
query_param, "max_total_tokens", global_config.get("MAX_TOTAL_TOKENS", DEFAULT_MAX_TOTAL_TOKENS)
|
||||
)
|
||||
|
||||
# Calculate conversation history tokens
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue