diff --git a/.env.example b/.env.example index 910972b3..2724ecb8 100644 --- a/.env.example +++ b/.env.example @@ -7,4 +7,5 @@ DEFAULT_DATABASE= USE_PARALLEL_RUNTIME= SEMAPHORE_LIMIT= GITHUB_SHA= -MAX_REFLEXION_ITERATIONS= \ No newline at end of file +MAX_REFLEXION_ITERATIONS= +ANTHROPIC_API_KEY= \ No newline at end of file diff --git a/graphiti_core/llm_client/anthropic_client.py b/graphiti_core/llm_client/anthropic_client.py index 866a1fea..f6161cee 100644 --- a/graphiti_core/llm_client/anthropic_client.py +++ b/graphiti_core/llm_client/anthropic_client.py @@ -139,15 +139,11 @@ class AnthropicClient(LLMClient): A list containing a single tool definition for use with the Anthropic API. """ if response_model is not None: - # temporary debug log - logger.info(f'Creating tool for response_model: {response_model}') # Use the response_model to define the tool model_schema = response_model.model_json_schema() tool_name = response_model.__name__ description = model_schema.get('description', f'Extract {tool_name} information') else: - # temporary debug log - logger.info('Creating generic JSON output tool') # Create a generic JSON output tool tool_name = 'generic_json_output' description = 'Output data in JSON format' @@ -205,8 +201,6 @@ class AnthropicClient(LLMClient): try: # Create the appropriate tool based on whether response_model is provided tools, tool_choice = self._create_tool(response_model) - # temporary debug log - logger.info(f'using model: {self.model} with max_tokens: {self.max_tokens}') result = await self.client.messages.create( system=system_message.content, max_tokens=max_creation_tokens, @@ -227,13 +221,6 @@ class AnthropicClient(LLMClient): return tool_args # If we didn't get a proper tool_use response, try to extract from text - # logger.debug( - # f'Did not get a tool_use response, trying to extract json from text. Result: {result.content}' - # ) - # temporary debug log - logger.info( - f'Did not get a tool_use response, trying to extract json from text. Result: {result.content}' - ) for content_item in result.content: if content_item.type == 'text': return self._extract_json_from_text(content_item.text) diff --git a/tests/integrations/test_anthropic_client_int.py b/tests/llm_client/test_anthropic_client_int.py similarity index 96% rename from tests/integrations/test_anthropic_client_int.py rename to tests/llm_client/test_anthropic_client_int.py index 6b9dcdc5..ce49a3c3 100644 --- a/tests/integrations/test_anthropic_client_int.py +++ b/tests/llm_client/test_anthropic_client_int.py @@ -78,7 +78,7 @@ async def test_extract_json_from_text(): # A string with embedded JSON text = 'Some text before {"message": "Hello, world!"} and after' - result = client._extract_json_from_text(text) + result = client._extract_json_from_text(text) # type: ignore # ignore type check for private method assert isinstance(result, dict) assert 'message' in result diff --git a/tests/llm_client/test_client.py b/tests/llm_client/test_client.py index 2716395d..a1032f45 100644 --- a/tests/llm_client/test_client.py +++ b/tests/llm_client/test_client.py @@ -18,7 +18,7 @@ from graphiti_core.llm_client.client import LLMClient from graphiti_core.llm_client.config import LLMConfig -class TestLLMClient(LLMClient): +class MockLLMClient(LLMClient): """Concrete implementation of LLMClient for testing""" async def _generate_response(self, messages, response_model=None): @@ -26,7 +26,7 @@ class TestLLMClient(LLMClient): def test_clean_input(): - client = TestLLMClient(LLMConfig()) + client = MockLLMClient(LLMConfig()) test_cases = [ # Basic text should remain unchanged