Fix linting
This commit is contained in:
parent
7735970a3b
commit
1927cb2685
2 changed files with 24 additions and 8 deletions
|
|
@ -74,13 +74,19 @@ class QueryParam:
|
|||
If None, defaults to top_k value.
|
||||
"""
|
||||
|
||||
max_entity_tokens: int = int(os.getenv("MAX_ENTITY_TOKENS", str(DEFAULT_MAX_ENTITY_TOKENS)))
|
||||
max_entity_tokens: int = int(
|
||||
os.getenv("MAX_ENTITY_TOKENS", str(DEFAULT_MAX_ENTITY_TOKENS))
|
||||
)
|
||||
"""Maximum number of tokens allocated for entity context in unified token control system."""
|
||||
|
||||
max_relation_tokens: int = int(os.getenv("MAX_RELATION_TOKENS", str(DEFAULT_MAX_RELATION_TOKENS)))
|
||||
max_relation_tokens: int = int(
|
||||
os.getenv("MAX_RELATION_TOKENS", str(DEFAULT_MAX_RELATION_TOKENS))
|
||||
)
|
||||
"""Maximum number of tokens allocated for relationship context in unified token control system."""
|
||||
|
||||
max_total_tokens: int = int(os.getenv("MAX_TOTAL_TOKENS", str(DEFAULT_MAX_TOTAL_TOKENS)))
|
||||
max_total_tokens: int = int(
|
||||
os.getenv("MAX_TOTAL_TOKENS", str(DEFAULT_MAX_TOTAL_TOKENS))
|
||||
)
|
||||
"""Maximum total tokens budget for the entire query context (entities + relations + chunks + system prompt)."""
|
||||
|
||||
hl_keywords: list[str] = field(default_factory=list)
|
||||
|
|
@ -111,7 +117,9 @@ class QueryParam:
|
|||
If proivded, this will be use instead of the default vaulue from prompt template.
|
||||
"""
|
||||
|
||||
enable_rerank: bool = os.getenv("ENABLE_RERANK", str(DEFAULT_ENABLE_RERANK).lower()).lower() == "true"
|
||||
enable_rerank: bool = (
|
||||
os.getenv("ENABLE_RERANK", str(DEFAULT_ENABLE_RERANK).lower()).lower() == "true"
|
||||
)
|
||||
"""Enable reranking for retrieved text chunks. If True but no rerank model is configured, a warning will be issued.
|
||||
Default is True to enable reranking when rerank model is available.
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -1964,17 +1964,23 @@ async def _build_query_context(
|
|||
max_entity_tokens = getattr(
|
||||
query_param,
|
||||
"max_entity_tokens",
|
||||
text_chunks_db.global_config.get("MAX_ENTITY_TOKENS", DEFAULT_MAX_ENTITY_TOKENS),
|
||||
text_chunks_db.global_config.get(
|
||||
"MAX_ENTITY_TOKENS", DEFAULT_MAX_ENTITY_TOKENS
|
||||
),
|
||||
)
|
||||
max_relation_tokens = getattr(
|
||||
query_param,
|
||||
"max_relation_tokens",
|
||||
text_chunks_db.global_config.get("MAX_RELATION_TOKENS", DEFAULT_MAX_RELATION_TOKENS),
|
||||
text_chunks_db.global_config.get(
|
||||
"MAX_RELATION_TOKENS", DEFAULT_MAX_RELATION_TOKENS
|
||||
),
|
||||
)
|
||||
max_total_tokens = getattr(
|
||||
query_param,
|
||||
"max_total_tokens",
|
||||
text_chunks_db.global_config.get("MAX_TOTAL_TOKENS", DEFAULT_MAX_TOTAL_TOKENS),
|
||||
text_chunks_db.global_config.get(
|
||||
"MAX_TOTAL_TOKENS", DEFAULT_MAX_TOTAL_TOKENS
|
||||
),
|
||||
)
|
||||
|
||||
# Truncate entities based on complete JSON serialization
|
||||
|
|
@ -2692,7 +2698,9 @@ async def naive_query(
|
|||
# Calculate dynamic token limit for chunks
|
||||
# Get token limits from query_param (with fallback to global_config)
|
||||
max_total_tokens = getattr(
|
||||
query_param, "max_total_tokens", global_config.get("MAX_TOTAL_TOKENS", DEFAULT_MAX_TOTAL_TOKENS)
|
||||
query_param,
|
||||
"max_total_tokens",
|
||||
global_config.get("MAX_TOTAL_TOKENS", DEFAULT_MAX_TOTAL_TOKENS),
|
||||
)
|
||||
|
||||
# Calculate conversation history tokens
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue