From 8b7e53e643054b12399b201d08f31ecbb6fc6ce1 Mon Sep 17 00:00:00 2001 From: Kevin Hu Date: Fri, 21 Mar 2025 17:30:38 +0800 Subject: [PATCH] Fix: miss calculate of token number. (#6401) ### What problem does this PR solve? #6308 ### Type of change - [x] Bug Fix (non-breaking change which fixes an issue) --- graphrag/general/extractor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graphrag/general/extractor.py b/graphrag/general/extractor.py index ff3c81add..5f62d137d 100644 --- a/graphrag/general/extractor.py +++ b/graphrag/general/extractor.py @@ -59,7 +59,7 @@ class Extractor: response = get_llm_cache(self._llm.llm_name, system, hist, conf) if response: return response - _, system_msg = message_fit_in([{"role": "system", "content": system}], int(self._llm.max_length * 0.97)) + _, system_msg = message_fit_in([{"role": "system", "content": system}], int(self._llm.max_length * 0.92)) response = self._llm.chat(system_msg[0]["content"], hist, conf) response = re.sub(r".*", "", response, flags=re.DOTALL) if response.find("**ERROR**") >= 0: