Merge pull request #386 from topoteretes/LANGFUSE_FIX

Fix langfuse
This commit is contained in:
Vasilije 2024-12-19 16:35:11 +01:00 committed by GitHub
commit 14cd1b1806
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 513 additions and 574 deletions

View file

@ -29,8 +29,17 @@ from cognee.tasks.repo_processor import (enrich_dependency_graph,
expand_dependency_graph,
get_repo_file_dependencies)
from cognee.tasks.storage import add_data_points
from cognee.base_config import get_base_config
from cognee.shared.data_models import MonitoringTool
monitoring = get_base_config().monitoring_tool
if monitoring == MonitoringTool.LANGFUSE:
from langfuse.decorators import observe
from cognee.tasks.summarization import summarize_code
logger = logging.getLogger("code_graph_pipeline")
update_status_lock = asyncio.Lock()
@ -62,7 +71,7 @@ async def code_graph_pipeline(datasets: Union[str, list[str]] = None, user: User
return await asyncio.gather(*awaitables)
@observe
async def run_pipeline(dataset: Dataset, user: User):
'''DEPRECATED: Use `run_code_graph_pipeline` instead. This function will be removed.'''
data_documents: list[Data] = await get_dataset_data(dataset_id = dataset.id)

View file

@ -10,7 +10,9 @@ class BaseConfig(BaseSettings):
monitoring_tool: object = MonitoringTool.LANGFUSE
graphistry_username: Optional[str] = os.getenv("GRAPHISTRY_USERNAME")
graphistry_password: Optional[str] = os.getenv("GRAPHISTRY_PASSWORD")
langfuse_public_key: Optional[str] = os.getenv("LANGFUSE_PUBLIC_KEY")
langfuse_secret_key: Optional[str] = os.getenv("LANGFUSE_SECRET_KEY")
langfuse_host: Optional[str] = os.getenv("LANGFUSE_HOST")
model_config = SettingsConfigDict(env_file = ".env", extra = "allow")
def to_dict(self) -> dict:

View file

@ -6,26 +6,31 @@ from typing import Type
import litellm
import instructor
from pydantic import BaseModel
from cognee.shared.data_models import MonitoringTool
from cognee.exceptions import InvalidValueError
from cognee.infrastructure.llm.llm_interface import LLMInterface
from cognee.infrastructure.llm.prompts import read_query_prompt
from cognee.base_config import get_base_config
if MonitoringTool.LANGFUSE:
from langfuse.decorators import observe
class OpenAIAdapter(LLMInterface):
name = "OpenAI"
model: str
api_key: str
api_version: str
"""Adapter for OpenAI's GPT-3, GPT=4 API"""
def __init__(
self,
api_key: str,
endpoint: str,
api_version: str,
model: str,
transcription_model: str,
streaming: bool = False,
self,
api_key: str,
endpoint: str,
api_version: str,
model: str,
transcription_model: str,
streaming: bool = False,
):
self.aclient = instructor.from_litellm(litellm.acompletion)
self.client = instructor.from_litellm(litellm.completion)
@ -35,13 +40,18 @@ class OpenAIAdapter(LLMInterface):
self.endpoint = endpoint
self.api_version = api_version
self.streaming = streaming
base_config = get_base_config()
@observe()
async def acreate_structured_output(self, text_input: str, system_prompt: str,
response_model: Type[BaseModel]) -> BaseModel:
async def acreate_structured_output(self, text_input: str, system_prompt: str, response_model: Type[BaseModel]) -> BaseModel:
"""Generate a response from a user query."""
return await self.aclient.chat.completions.create(
model = self.model,
messages = [{
model=self.model,
messages=[{
"role": "user",
"content": f"""Use the given format to
extract information from the following input: {text_input}. """,
@ -49,19 +59,21 @@ class OpenAIAdapter(LLMInterface):
"role": "system",
"content": system_prompt,
}],
api_key = self.api_key,
api_base = self.endpoint,
api_version = self.api_version,
response_model = response_model,
max_retries = 5,
api_key=self.api_key,
api_base=self.endpoint,
api_version=self.api_version,
response_model=response_model,
max_retries=5,
)
def create_structured_output(self, text_input: str, system_prompt: str, response_model: Type[BaseModel]) -> BaseModel:
@observe
def create_structured_output(self, text_input: str, system_prompt: str,
response_model: Type[BaseModel]) -> BaseModel:
"""Generate a response from a user query."""
return self.client.chat.completions.create(
model = self.model,
messages = [{
model=self.model,
messages=[{
"role": "user",
"content": f"""Use the given format to
extract information from the following input: {text_input}. """,
@ -69,11 +81,11 @@ class OpenAIAdapter(LLMInterface):
"role": "system",
"content": system_prompt,
}],
api_key = self.api_key,
api_base = self.endpoint,
api_version = self.api_version,
response_model = response_model,
max_retries = 5,
api_key=self.api_key,
api_base=self.endpoint,
api_version=self.api_version,
response_model=response_model,
max_retries=5,
)
def create_transcript(self, input):
@ -86,12 +98,12 @@ class OpenAIAdapter(LLMInterface):
# audio_data = audio_file.read()
transcription = litellm.transcription(
model = self.transcription_model,
file = Path(input),
model=self.transcription_model,
file=Path(input),
api_key=self.api_key,
api_base=self.endpoint,
api_version=self.api_version,
max_retries = 5,
max_retries=5,
)
return transcription
@ -101,8 +113,8 @@ class OpenAIAdapter(LLMInterface):
encoded_image = base64.b64encode(image_file.read()).decode('utf-8')
return litellm.completion(
model = self.model,
messages = [{
model=self.model,
messages=[{
"role": "user",
"content": [
{
@ -119,8 +131,8 @@ class OpenAIAdapter(LLMInterface):
api_key=self.api_key,
api_base=self.endpoint,
api_version=self.api_version,
max_tokens = 300,
max_retries = 5,
max_tokens=300,
max_retries=5,
)
def show_prompt(self, text_input: str, system_prompt: str) -> str:
@ -132,4 +144,4 @@ class OpenAIAdapter(LLMInterface):
system_prompt = read_query_prompt(system_prompt)
formatted_prompt = f"""System Prompt:\n{system_prompt}\n\nUser Input:\n{text_input}\n""" if system_prompt else None
return formatted_prompt
return formatted_prompt

989
poetry.lock generated

File diff suppressed because it is too large Load diff

View file

@ -46,7 +46,7 @@ aiofiles = "^23.2.1"
qdrant-client = {version = "^1.9.0", optional = true}
graphistry = "^0.33.5"
tenacity = "^8.4.1"
weaviate-client = {version = "4.6.7", optional = true}
weaviate-client = {version = "4.9.6", optional = true}
scikit-learn = "^1.5.0"
pypdf = "^4.1.0"
neo4j = {version = "^5.20.0", optional = true}
@ -60,7 +60,7 @@ posthog = {version = "^3.5.0", optional = true}
lancedb = "0.15.0"
litellm = "1.49.1"
groq = {version = "0.8.0", optional = true}
langfuse = {version = "^2.32.0", optional = true}
langfuse = "^2.32.0"
pydantic-settings = "^2.2.1"
anthropic = "^0.26.1"
sentry-sdk = {extras = ["fastapi"], version = "^2.9.0"}
@ -74,6 +74,7 @@ deepeval = {version = "^2.0.1", optional = true}
transformers = "^4.46.3"
pymilvus = {version = "^2.5.0", optional = true}
unstructured = { extras = ["csv", "doc", "docx", "epub", "md", "odt", "org", "ppt", "pptx", "rst", "rtf", "tsv", "xlsx"], version = "^0.16.10", optional = true }
httpx = "0.27.0"