refactor: Rename llitellm_instructor to litellm_instructor

This commit is contained in:
Igor Ilic 2025-08-05 20:23:50 +02:00
parent 1fd4c4fa8b
commit 6f8c4205c5
27 changed files with 40 additions and 37 deletions

View file

@ -21,7 +21,7 @@ class LLMAdapter:
def acreate_structured_output(
text_input: str, system_prompt: str, response_model: Type[BaseModel]
) -> BaseModel:
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.get_llm_client import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.get_llm_client import (
get_llm_client,
)
@ -34,7 +34,7 @@ class LLMAdapter:
def create_structured_output(
text_input: str, system_prompt: str, response_model: Type[BaseModel]
) -> BaseModel:
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.get_llm_client import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.get_llm_client import (
get_llm_client,
)
@ -45,7 +45,7 @@ class LLMAdapter:
@staticmethod
def create_transcript(input):
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.get_llm_client import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.get_llm_client import (
get_llm_client,
)
@ -54,7 +54,7 @@ class LLMAdapter:
@staticmethod
def transcribe_image(input):
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.get_llm_client import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.get_llm_client import (
get_llm_client,
)
@ -63,7 +63,7 @@ class LLMAdapter:
@staticmethod
def show_prompt(text_input: str, system_prompt: str) -> str:
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.get_llm_client import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.get_llm_client import (
get_llm_client,
)
@ -88,7 +88,7 @@ class LLMAdapter:
return extract_content_graph(content=content, response_model=response_model, mode=mode)
else:
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.extraction import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.extraction import (
extract_content_graph,
)
@ -97,7 +97,7 @@ class LLMAdapter:
@staticmethod
def extract_categories(content: str, response_model: Type[BaseModel]):
# TODO: Add BAML version of category and extraction and update function
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.extraction import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.extraction import (
extract_categories,
)
@ -113,7 +113,7 @@ class LLMAdapter:
return extract_code_summary(content=content)
else:
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.extraction import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.extraction import (
extract_code_summary,
)
@ -129,7 +129,7 @@ class LLMAdapter:
return extract_summary(content=content, response_model=response_model)
else:
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.extraction import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.extraction import (
extract_summary,
)

View file

@ -3,10 +3,10 @@ from pydantic import BaseModel
import instructor
from cognee.exceptions import InvalidValueError
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.llm_interface import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.llm_interface import (
LLMInterface,
)
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.rate_limiter import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.rate_limiter import (
rate_limit_async,
sleep_and_retry_async,
)

View file

@ -6,11 +6,11 @@ from litellm import acompletion, JSONSchemaValidationError
from cognee.shared.logging_utils import get_logger
from cognee.modules.observability.get_observe import get_observe
from cognee.exceptions import InvalidValueError
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.llm_interface import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.llm_interface import (
LLMInterface,
)
from cognee.infrastructure.llm.LLMAdapter import LLMAdapter
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.rate_limiter import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.rate_limiter import (
rate_limit_async,
sleep_and_retry_async,
)

View file

@ -9,10 +9,10 @@ from litellm.exceptions import ContentPolicyViolationError
from instructor.exceptions import InstructorRetryException
from cognee.infrastructure.llm.exceptions import ContentPolicyFilterError
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.llm_interface import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.llm_interface import (
LLMInterface,
)
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.rate_limiter import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.rate_limiter import (
rate_limit_async,
sleep_and_retry_async,
)

View file

@ -4,7 +4,7 @@ from enum import Enum
from cognee.exceptions import InvalidValueError
from cognee.infrastructure.llm import get_llm_config
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.ollama.adapter import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.ollama.adapter import (
OllamaAPIAdapter,
)
@ -61,7 +61,7 @@ def get_llm_client():
if llm_config.llm_api_key is None:
raise InvalidValueError(message="LLM API key is not set.")
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.openai.adapter import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.openai.adapter import (
OpenAIAdapter,
)
@ -82,7 +82,7 @@ def get_llm_client():
if llm_config.llm_api_key is None:
raise InvalidValueError(message="LLM API key is not set.")
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.generic_llm_api import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.generic_llm_api import (
GenericAPIAdapter,
)
@ -95,7 +95,7 @@ def get_llm_client():
)
elif provider == LLMProvider.ANTHROPIC:
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.anthropic import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.anthropic import (
AnthropicAdapter,
)
@ -105,7 +105,7 @@ def get_llm_client():
if llm_config.llm_api_key is None:
raise InvalidValueError(message="LLM API key is not set.")
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.generic_llm_api import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.generic_llm_api import (
GenericAPIAdapter,
)
@ -124,7 +124,7 @@ def get_llm_client():
if llm_config.llm_api_key is None:
raise InvalidValueError(message="LLM API key is not set.")
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.gemini import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.gemini import (
GeminiAdapter,
)

View file

@ -4,10 +4,10 @@ from typing import Type
from openai import OpenAI
from pydantic import BaseModel
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.llm_interface import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.llm_interface import (
LLMInterface,
)
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.rate_limiter import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.rate_limiter import (
rate_limit_async,
sleep_and_retry_async,
)

View file

@ -9,12 +9,12 @@ from instructor.exceptions import InstructorRetryException
from cognee.exceptions import InvalidValueError
from cognee.infrastructure.llm.LLMAdapter import LLMAdapter
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.llm_interface import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.llm_interface import (
LLMInterface,
)
from cognee.infrastructure.llm.exceptions import ContentPolicyFilterError
from cognee.infrastructure.files.utils.open_data_file import open_data_file
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.rate_limiter import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.rate_limiter import (
rate_limit_async,
rate_limit_sync,
sleep_and_retry_async,

View file

@ -24,7 +24,7 @@ class GeminiTokenizer(TokenizerInterface):
# Get LLM API key from config
from cognee.infrastructure.databases.vector.embeddings.config import get_embedding_config
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.config import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.config import (
get_llm_config,
)

View file

@ -1,6 +1,6 @@
import litellm
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.get_llm_client import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.get_llm_client import (
get_llm_client,
)
from cognee.shared.logging_utils import get_logger

View file

@ -2,12 +2,15 @@
import pytest
from unittest.mock import patch
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.rate_limiter import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.rate_limiter import (
llm_rate_limiter,
rate_limit_async,
rate_limit_sync,
)
LLM_RATE_LIMITER = "cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.rate_limiter.llm_rate_limiter"
GET_LLM_CONFIG = "cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.rate_limiter.get_llm_config"
@pytest.fixture(autouse=True)
def reset_limiter_singleton():
@ -18,7 +21,7 @@ def reset_limiter_singleton():
def test_rate_limiter_initialization():
"""Test that the rate limiter can be initialized properly."""
with patch("cognee.infrastructure.llm.rate_limiter.get_llm_config") as mock_config:
with patch(GET_LLM_CONFIG) as mock_config:
mock_config.return_value.llm_rate_limit_enabled = True
mock_config.return_value.llm_rate_limit_requests = 10
mock_config.return_value.llm_rate_limit_interval = 60 # 1 minute
@ -32,7 +35,7 @@ def test_rate_limiter_initialization():
def test_rate_limiter_disabled():
"""Test that the rate limiter is disabled by default."""
with patch("cognee.infrastructure.llm.rate_limiter.get_llm_config") as mock_config:
with patch(GET_LLM_CONFIG) as mock_config:
mock_config.return_value.llm_rate_limit_enabled = False
limiter = llm_rate_limiter()
@ -43,7 +46,7 @@ def test_rate_limiter_disabled():
def test_rate_limiter_singleton():
"""Test that the rate limiter is a singleton."""
with patch("cognee.infrastructure.llm.rate_limiter.get_llm_config") as mock_config:
with patch(GET_LLM_CONFIG) as mock_config:
mock_config.return_value.llm_rate_limit_enabled = True
mock_config.return_value.llm_rate_limit_requests = 5
mock_config.return_value.llm_rate_limit_interval = 60
@ -56,7 +59,7 @@ def test_rate_limiter_singleton():
def test_sync_decorator():
"""Test the sync decorator."""
with patch("cognee.infrastructure.llm.rate_limiter.llm_rate_limiter") as mock_limiter_class:
with patch(LLM_RATE_LIMITER) as mock_limiter_class:
mock_limiter = mock_limiter_class.return_value
mock_limiter.wait_if_needed.return_value = 0
@ -73,7 +76,7 @@ def test_sync_decorator():
@pytest.mark.asyncio
async def test_async_decorator():
"""Test the async decorator."""
with patch("cognee.infrastructure.llm.rate_limiter.llm_rate_limiter") as mock_limiter_class:
with patch(LLM_RATE_LIMITER) as mock_limiter_class:
mock_limiter = mock_limiter_class.return_value
# Mock an async method with a coroutine
@ -94,7 +97,7 @@ async def test_async_decorator():
def test_rate_limiting_actual():
"""Test actual rate limiting behavior with a small window."""
with patch("cognee.infrastructure.llm.rate_limiter.get_llm_config") as mock_config:
with patch(GET_LLM_CONFIG) as mock_config:
# Configure for 3 requests per minute
mock_config.return_value.llm_rate_limit_enabled = True
mock_config.return_value.llm_rate_limit_requests = 3
@ -115,7 +118,7 @@ def test_rate_limiting_actual():
def test_rate_limit_60_per_minute():
"""Test rate limiting with the default 60 requests per minute limit."""
with patch("cognee.infrastructure.llm.rate_limiter.get_llm_config") as mock_config:
with patch(GET_LLM_CONFIG) as mock_config:
# Configure for default values: 60 requests per 60 seconds
mock_config.return_value.llm_rate_limit_enabled = True
mock_config.return_value.llm_rate_limit_requests = 60 # 60 requests

View file

@ -2,7 +2,7 @@ import asyncio
import os
from unittest.mock import patch
from cognee.shared.logging_utils import get_logger
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.rate_limiter import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.rate_limiter import (
llm_rate_limiter,
)
from cognee.infrastructure.llm.config import (

View file

@ -1,7 +1,7 @@
import time
import asyncio
from cognee.shared.logging_utils import get_logger
from cognee.infrastructure.llm.structured_output_framework.llitellm_instructor.llm.rate_limiter import (
from cognee.infrastructure.llm.structured_output_framework.litellm_instructor.llm.rate_limiter import (
sleep_and_retry_sync,
sleep_and_retry_async,
is_rate_limit_error,