diff --git a/graphiti_core/llm_client/client.py b/graphiti_core/llm_client/client.py index b267785b..aa1a0f8a 100644 --- a/graphiti_core/llm_client/client.py +++ b/graphiti_core/llm_client/client.py @@ -32,6 +32,10 @@ from .errors import RateLimitError DEFAULT_TEMPERATURE = 0 DEFAULT_CACHE_DIR = './llm_cache' +MULTILINGUAL_EXTRACTION_RESPONSES = ( + '\n\nAny extracted information should be returned in the same language as it was written in.' +) + logger = logging.getLogger(__name__) @@ -133,6 +137,9 @@ class LLMClient(ABC): f'\n\nRespond with a JSON object in the following format:\n\n{serialized_model}' ) + # Add multilingual extraction instructions + messages[0].content += MULTILINGUAL_EXTRACTION_RESPONSES + if self.cache_enabled and self.cache_dir is not None: cache_key = self._get_cache_key(messages) diff --git a/graphiti_core/llm_client/openai_client.py b/graphiti_core/llm_client/openai_client.py index 6abf90f9..5c187d21 100644 --- a/graphiti_core/llm_client/openai_client.py +++ b/graphiti_core/llm_client/openai_client.py @@ -24,7 +24,7 @@ from openai.types.chat import ChatCompletionMessageParam from pydantic import BaseModel from ..prompts.models import Message -from .client import LLMClient +from .client import MULTILINGUAL_EXTRACTION_RESPONSES, LLMClient from .config import DEFAULT_MAX_TOKENS, LLMConfig from .errors import RateLimitError, RefusalError @@ -136,6 +136,9 @@ class OpenAIClient(LLMClient): retry_count = 0 last_error = None + # Add multilingual extraction instructions + messages[0].content += MULTILINGUAL_EXTRACTION_RESPONSES + while retry_count <= self.MAX_RETRIES: try: response = await self._generate_response(messages, response_model, max_tokens) diff --git a/graphiti_core/llm_client/openai_generic_client.py b/graphiti_core/llm_client/openai_generic_client.py index b64ca67b..184348cc 100644 --- a/graphiti_core/llm_client/openai_generic_client.py +++ b/graphiti_core/llm_client/openai_generic_client.py @@ -25,7 +25,7 @@ from openai.types.chat import ChatCompletionMessageParam from pydantic import BaseModel from ..prompts.models import Message -from .client import LLMClient +from .client import MULTILINGUAL_EXTRACTION_RESPONSES, LLMClient from .config import DEFAULT_MAX_TOKENS, LLMConfig from .errors import RateLimitError, RefusalError @@ -130,6 +130,9 @@ class OpenAIGenericClient(LLMClient): f'\n\nRespond with a JSON object in the following format:\n\n{serialized_model}' ) + # Add multilingual extraction instructions + messages[0].content += MULTILINGUAL_EXTRACTION_RESPONSES + while retry_count <= self.MAX_RETRIES: try: response = await self._generate_response(