diff --git a/cognee/config.py b/cognee/config.py index c9548198b..08014e803 100644 --- a/cognee/config.py +++ b/cognee/config.py @@ -79,6 +79,11 @@ class Config: graph_topology:str = DefaultGraphModel cognitive_layers_limit: int = 2 + from cognee.shared.data_models import MonitoringTool + + # Monitoring tool + monitoring_tool: str = os.getenv("MONITORING_TOOL", MonitoringTool.LANGFUSE) + if ( os.getenv("ENV") == "prod" or os.getenv("ENV") == "dev" diff --git a/cognee/infrastructure/InfrastructureConfig.py b/cognee/infrastructure/InfrastructureConfig.py index 3ab42625c..21e6aacc6 100644 --- a/cognee/infrastructure/InfrastructureConfig.py +++ b/cognee/infrastructure/InfrastructureConfig.py @@ -33,6 +33,7 @@ class InfrastructureConfig(): chunk_strategy = config.chunk_strategy chunk_engine = None graph_topology = config.graph_topology + monitoring_tool = config.monitoring_tool def get_config(self, config_entity: str = None) -> dict: if (config_entity is None or config_entity == "database_engine") and self.database_engine is None: diff --git a/cognee/infrastructure/llm/openai/adapter.py b/cognee/infrastructure/llm/openai/adapter.py index 50a6d1529..5c45b98bc 100644 --- a/cognee/infrastructure/llm/openai/adapter.py +++ b/cognee/infrastructure/llm/openai/adapter.py @@ -2,11 +2,21 @@ import asyncio from typing import List, Type import openai import instructor -from openai import AsyncOpenAI, OpenAI from pydantic import BaseModel from tenacity import retry, stop_after_attempt + +from cognee.config import Config from cognee.infrastructure.llm.llm_interface import LLMInterface from cognee.infrastructure.llm.prompts import read_query_prompt +from cognee.shared.data_models import MonitoringTool + +config = Config() +config.load() + +if config.monitoring_tool == MonitoringTool.LANGFUSE: + from langfuse.openai import AsyncOpenAI, OpenAI +else: + from openai import AsyncOpenAI, OpenAI class OpenAIAdapter(LLMInterface): """Adapter for OpenAI's GPT-3, GPT=4 API""" diff --git a/cognee/shared/data_models.py b/cognee/shared/data_models.py index cbaabfe44..28fe72040 100644 --- a/cognee/shared/data_models.py +++ b/cognee/shared/data_models.py @@ -252,3 +252,9 @@ class ResponseSummaryModel(BaseModel): document_id: str response_summary: str + +class MonitoringTool(str, Enum): + """ Monitoring tools """ + LANGFUSE = "langfuse" + LLMLITE = "llmlite" + diff --git a/docker-compose.yml b/docker-compose.yml index 86a67b467..2ede528d2 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -55,6 +55,18 @@ services: - cognee_backend ports: - "5432:5432" + litellm: + build: + context: . + args: + target: runtime + image: ghcr.io/berriai/litellm:main-latest + ports: + - "4000:4000" # Map the container port to the host, change the host port if necessary + volumes: + - ./litellm-config.yaml:/app/config.yaml # Mount the local configuration file + # You can change the port or number of workers as per your requirements or pass any new supported CLI augument. Make sure the port passed here matches with the container port defined above in `ports` value + command: [ "--config", "/app/config.yaml", "--port", "4000", "--num_workers", "8" ] networks: cognee_backend: diff --git a/pyproject.toml b/pyproject.toml index 67cd40167..400ede490 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -66,6 +66,7 @@ deepeval = "^0.21.36" litellm = "^1.37.3" groq = "^0.5.0" tantivy = "^0.21.0" +langfuse = "^2.32.0" [tool.poetry.extras]