Fix hl_keywords and ll_keywords cache logic

- Remove hl_keywords and ll_keywords from keywork extracht cache
- Add hl_keywords and ll_keywords to LLM query cache
This commit is contained in:
yangdx 2025-09-27 15:26:52 +08:00
parent e7948df541
commit dbb0b3afb4

View file

@ -2368,8 +2368,8 @@ async def kg_query(
query_param.max_entity_tokens, query_param.max_entity_tokens,
query_param.max_relation_tokens, query_param.max_relation_tokens,
query_param.max_total_tokens, query_param.max_total_tokens,
query_param.hl_keywords or [], hl_keywords_str,
query_param.ll_keywords or [], ll_keywords_str,
query_param.user_prompt or "", query_param.user_prompt or "",
query_param.enable_rerank, query_param.enable_rerank,
) )
@ -2402,8 +2402,8 @@ async def kg_query(
"max_entity_tokens": query_param.max_entity_tokens, "max_entity_tokens": query_param.max_entity_tokens,
"max_relation_tokens": query_param.max_relation_tokens, "max_relation_tokens": query_param.max_relation_tokens,
"max_total_tokens": query_param.max_total_tokens, "max_total_tokens": query_param.max_total_tokens,
"hl_keywords": query_param.hl_keywords or [], "hl_keywords": hl_keywords_str,
"ll_keywords": query_param.ll_keywords or [], "ll_keywords": ll_keywords_str,
"user_prompt": query_param.user_prompt or "", "user_prompt": query_param.user_prompt or "",
"enable_rerank": query_param.enable_rerank, "enable_rerank": query_param.enable_rerank,
} }
@ -2491,8 +2491,6 @@ async def extract_keywords_only(
args_hash = compute_args_hash( args_hash = compute_args_hash(
param.mode, param.mode,
text, text,
param.hl_keywords or [],
param.ll_keywords or [],
) )
cached_result = await handle_cache( cached_result = await handle_cache(
hashing_kv, args_hash, text, param.mode, cache_type="keywords" hashing_kv, args_hash, text, param.mode, cache_type="keywords"
@ -2568,8 +2566,6 @@ async def extract_keywords_only(
"max_entity_tokens": param.max_entity_tokens, "max_entity_tokens": param.max_entity_tokens,
"max_relation_tokens": param.max_relation_tokens, "max_relation_tokens": param.max_relation_tokens,
"max_total_tokens": param.max_total_tokens, "max_total_tokens": param.max_total_tokens,
"hl_keywords": param.hl_keywords or [],
"ll_keywords": param.ll_keywords or [],
"user_prompt": param.user_prompt or "", "user_prompt": param.user_prompt or "",
"enable_rerank": param.enable_rerank, "enable_rerank": param.enable_rerank,
} }
@ -4165,8 +4161,6 @@ async def naive_query(
query_param.max_entity_tokens, query_param.max_entity_tokens,
query_param.max_relation_tokens, query_param.max_relation_tokens,
query_param.max_total_tokens, query_param.max_total_tokens,
query_param.hl_keywords or [],
query_param.ll_keywords or [],
query_param.user_prompt or "", query_param.user_prompt or "",
query_param.enable_rerank, query_param.enable_rerank,
) )
@ -4197,8 +4191,6 @@ async def naive_query(
"max_entity_tokens": query_param.max_entity_tokens, "max_entity_tokens": query_param.max_entity_tokens,
"max_relation_tokens": query_param.max_relation_tokens, "max_relation_tokens": query_param.max_relation_tokens,
"max_total_tokens": query_param.max_total_tokens, "max_total_tokens": query_param.max_total_tokens,
"hl_keywords": query_param.hl_keywords or [],
"ll_keywords": query_param.ll_keywords or [],
"user_prompt": query_param.user_prompt or "", "user_prompt": query_param.user_prompt or "",
"enable_rerank": query_param.enable_rerank, "enable_rerank": query_param.enable_rerank,
} }