test with a simple file

This commit is contained in:
Vasilije 2024-05-18 18:44:18 +02:00
parent 4488339fdc
commit d1fa826aeb
6 changed files with 36 additions and 1 deletions

View file

@ -79,6 +79,11 @@ class Config:
graph_topology:str = DefaultGraphModel graph_topology:str = DefaultGraphModel
cognitive_layers_limit: int = 2 cognitive_layers_limit: int = 2
from cognee.shared.data_models import MonitoringTool
# Monitoring tool
monitoring_tool: str = os.getenv("MONITORING_TOOL", MonitoringTool.LANGFUSE)
if ( if (
os.getenv("ENV") == "prod" os.getenv("ENV") == "prod"
or os.getenv("ENV") == "dev" or os.getenv("ENV") == "dev"

View file

@ -33,6 +33,7 @@ class InfrastructureConfig():
chunk_strategy = config.chunk_strategy chunk_strategy = config.chunk_strategy
chunk_engine = None chunk_engine = None
graph_topology = config.graph_topology graph_topology = config.graph_topology
monitoring_tool = config.monitoring_tool
def get_config(self, config_entity: str = None) -> dict: def get_config(self, config_entity: str = None) -> dict:
if (config_entity is None or config_entity == "database_engine") and self.database_engine is None: if (config_entity is None or config_entity == "database_engine") and self.database_engine is None:

View file

@ -2,11 +2,21 @@ import asyncio
from typing import List, Type from typing import List, Type
import openai import openai
import instructor import instructor
from openai import AsyncOpenAI, OpenAI
from pydantic import BaseModel from pydantic import BaseModel
from tenacity import retry, stop_after_attempt from tenacity import retry, stop_after_attempt
from cognee.config import Config
from cognee.infrastructure.llm.llm_interface import LLMInterface from cognee.infrastructure.llm.llm_interface import LLMInterface
from cognee.infrastructure.llm.prompts import read_query_prompt from cognee.infrastructure.llm.prompts import read_query_prompt
from cognee.shared.data_models import MonitoringTool
config = Config()
config.load()
if config.monitoring_tool == MonitoringTool.LANGFUSE:
from langfuse.openai import AsyncOpenAI, OpenAI
else:
from openai import AsyncOpenAI, OpenAI
class OpenAIAdapter(LLMInterface): class OpenAIAdapter(LLMInterface):
"""Adapter for OpenAI's GPT-3, GPT=4 API""" """Adapter for OpenAI's GPT-3, GPT=4 API"""

View file

@ -252,3 +252,9 @@ class ResponseSummaryModel(BaseModel):
document_id: str document_id: str
response_summary: str response_summary: str
class MonitoringTool(str, Enum):
""" Monitoring tools """
LANGFUSE = "langfuse"
LLMLITE = "llmlite"

View file

@ -55,6 +55,18 @@ services:
- cognee_backend - cognee_backend
ports: ports:
- "5432:5432" - "5432:5432"
litellm:
build:
context: .
args:
target: runtime
image: ghcr.io/berriai/litellm:main-latest
ports:
- "4000:4000" # Map the container port to the host, change the host port if necessary
volumes:
- ./litellm-config.yaml:/app/config.yaml # Mount the local configuration file
# You can change the port or number of workers as per your requirements or pass any new supported CLI augument. Make sure the port passed here matches with the container port defined above in `ports` value
command: [ "--config", "/app/config.yaml", "--port", "4000", "--num_workers", "8" ]
networks: networks:
cognee_backend: cognee_backend:

View file

@ -66,6 +66,7 @@ deepeval = "^0.21.36"
litellm = "^1.37.3" litellm = "^1.37.3"
groq = "^0.5.0" groq = "^0.5.0"
tantivy = "^0.21.0" tantivy = "^0.21.0"
langfuse = "^2.32.0"
[tool.poetry.extras] [tool.poetry.extras]