diff --git a/README-zh.md b/README-zh.md index 45335489..e9599099 100644 --- a/README-zh.md +++ b/README-zh.md @@ -30,7 +30,7 @@

- +

diff --git a/lightrag/operate.py b/lightrag/operate.py index 88837435..4e219cf8 100644 --- a/lightrag/operate.py +++ b/lightrag/operate.py @@ -26,6 +26,7 @@ from .utils import ( get_conversation_turns, use_llm_func_with_cache, update_chunk_cache_list, + remove_think_tags, ) from .base import ( BaseGraphStorage, @@ -1703,7 +1704,8 @@ async def extract_keywords_only( result = await use_model_func(kw_prompt, keyword_extraction=True) # 6. Parse out JSON from the LLM response - match = re.search(r"\{.*\}", result, re.DOTALL) + result = remove_think_tags(result) + match = re.search(r"\{.*?\}", result, re.DOTALL) if not match: logger.error("No JSON-like structure found in the LLM respond.") return [], [] diff --git a/lightrag/utils.py b/lightrag/utils.py index c6e2def9..386de3ab 100644 --- a/lightrag/utils.py +++ b/lightrag/utils.py @@ -1465,6 +1465,11 @@ async def update_chunk_cache_list( ) +def remove_think_tags(text: str) -> str: + """Remove tags from the text""" + return re.sub(r"^(.*?|)", "", text, flags=re.DOTALL).strip() + + async def use_llm_func_with_cache( input_text: str, use_llm_func: callable, @@ -1531,6 +1536,7 @@ async def use_llm_func_with_cache( kwargs["max_tokens"] = max_tokens res: str = await use_llm_func(input_text, **kwargs) + res = remove_think_tags(res) if llm_response_cache.global_config.get("enable_llm_cache_for_entity_extract"): await save_to_cache( @@ -1557,8 +1563,9 @@ async def use_llm_func_with_cache( if max_tokens is not None: kwargs["max_tokens"] = max_tokens - logger.info(f"Call LLM function with query text lenght: {len(input_text)}") - return await use_llm_func(input_text, **kwargs) + logger.info(f"Call LLM function with query text length: {len(input_text)}") + res = await use_llm_func(input_text, **kwargs) + return remove_think_tags(res) def get_content_summary(content: str, max_length: int = 250) -> str: