Merge branch 'dev' into COG-475-local-file-endpoint-deletion
This commit is contained in:
commit
6cb7fef411
9 changed files with 780 additions and 579 deletions
|
|
@ -29,8 +29,17 @@ from cognee.tasks.repo_processor import (enrich_dependency_graph,
|
|||
expand_dependency_graph,
|
||||
get_repo_file_dependencies)
|
||||
from cognee.tasks.storage import add_data_points
|
||||
|
||||
from cognee.base_config import get_base_config
|
||||
from cognee.shared.data_models import MonitoringTool
|
||||
|
||||
monitoring = get_base_config().monitoring_tool
|
||||
if monitoring == MonitoringTool.LANGFUSE:
|
||||
from langfuse.decorators import observe
|
||||
|
||||
from cognee.tasks.summarization import summarize_code
|
||||
|
||||
|
||||
logger = logging.getLogger("code_graph_pipeline")
|
||||
|
||||
update_status_lock = asyncio.Lock()
|
||||
|
|
@ -62,7 +71,7 @@ async def code_graph_pipeline(datasets: Union[str, list[str]] = None, user: User
|
|||
|
||||
return await asyncio.gather(*awaitables)
|
||||
|
||||
|
||||
@observe
|
||||
async def run_pipeline(dataset: Dataset, user: User):
|
||||
'''DEPRECATED: Use `run_code_graph_pipeline` instead. This function will be removed.'''
|
||||
data_documents: list[Data] = await get_dataset_data(dataset_id = dataset.id)
|
||||
|
|
|
|||
|
|
@ -10,7 +10,9 @@ class BaseConfig(BaseSettings):
|
|||
monitoring_tool: object = MonitoringTool.LANGFUSE
|
||||
graphistry_username: Optional[str] = os.getenv("GRAPHISTRY_USERNAME")
|
||||
graphistry_password: Optional[str] = os.getenv("GRAPHISTRY_PASSWORD")
|
||||
|
||||
langfuse_public_key: Optional[str] = os.getenv("LANGFUSE_PUBLIC_KEY")
|
||||
langfuse_secret_key: Optional[str] = os.getenv("LANGFUSE_SECRET_KEY")
|
||||
langfuse_host: Optional[str] = os.getenv("LANGFUSE_HOST")
|
||||
model_config = SettingsConfigDict(env_file = ".env", extra = "allow")
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
|
|
|
|||
|
|
@ -6,26 +6,31 @@ from typing import Type
|
|||
import litellm
|
||||
import instructor
|
||||
from pydantic import BaseModel
|
||||
|
||||
from cognee.shared.data_models import MonitoringTool
|
||||
from cognee.exceptions import InvalidValueError
|
||||
from cognee.infrastructure.llm.llm_interface import LLMInterface
|
||||
from cognee.infrastructure.llm.prompts import read_query_prompt
|
||||
from cognee.base_config import get_base_config
|
||||
|
||||
if MonitoringTool.LANGFUSE:
|
||||
from langfuse.decorators import observe
|
||||
|
||||
class OpenAIAdapter(LLMInterface):
|
||||
name = "OpenAI"
|
||||
model: str
|
||||
api_key: str
|
||||
api_version: str
|
||||
|
||||
|
||||
"""Adapter for OpenAI's GPT-3, GPT=4 API"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_key: str,
|
||||
endpoint: str,
|
||||
api_version: str,
|
||||
model: str,
|
||||
transcription_model: str,
|
||||
streaming: bool = False,
|
||||
self,
|
||||
api_key: str,
|
||||
endpoint: str,
|
||||
api_version: str,
|
||||
model: str,
|
||||
transcription_model: str,
|
||||
streaming: bool = False,
|
||||
):
|
||||
self.aclient = instructor.from_litellm(litellm.acompletion)
|
||||
self.client = instructor.from_litellm(litellm.completion)
|
||||
|
|
@ -35,13 +40,18 @@ class OpenAIAdapter(LLMInterface):
|
|||
self.endpoint = endpoint
|
||||
self.api_version = api_version
|
||||
self.streaming = streaming
|
||||
base_config = get_base_config()
|
||||
|
||||
|
||||
@observe()
|
||||
async def acreate_structured_output(self, text_input: str, system_prompt: str,
|
||||
response_model: Type[BaseModel]) -> BaseModel:
|
||||
|
||||
async def acreate_structured_output(self, text_input: str, system_prompt: str, response_model: Type[BaseModel]) -> BaseModel:
|
||||
"""Generate a response from a user query."""
|
||||
|
||||
return await self.aclient.chat.completions.create(
|
||||
model = self.model,
|
||||
messages = [{
|
||||
model=self.model,
|
||||
messages=[{
|
||||
"role": "user",
|
||||
"content": f"""Use the given format to
|
||||
extract information from the following input: {text_input}. """,
|
||||
|
|
@ -49,19 +59,21 @@ class OpenAIAdapter(LLMInterface):
|
|||
"role": "system",
|
||||
"content": system_prompt,
|
||||
}],
|
||||
api_key = self.api_key,
|
||||
api_base = self.endpoint,
|
||||
api_version = self.api_version,
|
||||
response_model = response_model,
|
||||
max_retries = 5,
|
||||
api_key=self.api_key,
|
||||
api_base=self.endpoint,
|
||||
api_version=self.api_version,
|
||||
response_model=response_model,
|
||||
max_retries=5,
|
||||
)
|
||||
|
||||
def create_structured_output(self, text_input: str, system_prompt: str, response_model: Type[BaseModel]) -> BaseModel:
|
||||
@observe
|
||||
def create_structured_output(self, text_input: str, system_prompt: str,
|
||||
response_model: Type[BaseModel]) -> BaseModel:
|
||||
"""Generate a response from a user query."""
|
||||
|
||||
return self.client.chat.completions.create(
|
||||
model = self.model,
|
||||
messages = [{
|
||||
model=self.model,
|
||||
messages=[{
|
||||
"role": "user",
|
||||
"content": f"""Use the given format to
|
||||
extract information from the following input: {text_input}. """,
|
||||
|
|
@ -69,11 +81,11 @@ class OpenAIAdapter(LLMInterface):
|
|||
"role": "system",
|
||||
"content": system_prompt,
|
||||
}],
|
||||
api_key = self.api_key,
|
||||
api_base = self.endpoint,
|
||||
api_version = self.api_version,
|
||||
response_model = response_model,
|
||||
max_retries = 5,
|
||||
api_key=self.api_key,
|
||||
api_base=self.endpoint,
|
||||
api_version=self.api_version,
|
||||
response_model=response_model,
|
||||
max_retries=5,
|
||||
)
|
||||
|
||||
def create_transcript(self, input):
|
||||
|
|
@ -86,12 +98,12 @@ class OpenAIAdapter(LLMInterface):
|
|||
# audio_data = audio_file.read()
|
||||
|
||||
transcription = litellm.transcription(
|
||||
model = self.transcription_model,
|
||||
file = Path(input),
|
||||
model=self.transcription_model,
|
||||
file=Path(input),
|
||||
api_key=self.api_key,
|
||||
api_base=self.endpoint,
|
||||
api_version=self.api_version,
|
||||
max_retries = 5,
|
||||
max_retries=5,
|
||||
)
|
||||
|
||||
return transcription
|
||||
|
|
@ -101,8 +113,8 @@ class OpenAIAdapter(LLMInterface):
|
|||
encoded_image = base64.b64encode(image_file.read()).decode('utf-8')
|
||||
|
||||
return litellm.completion(
|
||||
model = self.model,
|
||||
messages = [{
|
||||
model=self.model,
|
||||
messages=[{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
|
|
@ -119,8 +131,8 @@ class OpenAIAdapter(LLMInterface):
|
|||
api_key=self.api_key,
|
||||
api_base=self.endpoint,
|
||||
api_version=self.api_version,
|
||||
max_tokens = 300,
|
||||
max_retries = 5,
|
||||
max_tokens=300,
|
||||
max_retries=5,
|
||||
)
|
||||
|
||||
def show_prompt(self, text_input: str, system_prompt: str) -> str:
|
||||
|
|
@ -132,4 +144,4 @@ class OpenAIAdapter(LLMInterface):
|
|||
system_prompt = read_query_prompt(system_prompt)
|
||||
|
||||
formatted_prompt = f"""System Prompt:\n{system_prompt}\n\nUser Input:\n{text_input}\n""" if system_prompt else None
|
||||
return formatted_prompt
|
||||
return formatted_prompt
|
||||
|
|
@ -1,10 +1,11 @@
|
|||
from typing import Type
|
||||
|
||||
import os
|
||||
from pydantic import BaseModel
|
||||
|
||||
from cognee.infrastructure.llm.get_llm_client import get_llm_client
|
||||
from cognee.infrastructure.llm.prompts import read_query_prompt
|
||||
from cognee.shared.data_models import SummarizedCode
|
||||
from cognee.shared.data_models import SummarizedCode, SummarizedClass, SummarizedFunction
|
||||
from cognee.tasks.summarization.mock_summary import get_mock_summarized_code
|
||||
|
||||
|
||||
async def extract_summary(content: str, response_model: Type[BaseModel]):
|
||||
|
|
@ -17,5 +18,14 @@ async def extract_summary(content: str, response_model: Type[BaseModel]):
|
|||
return llm_output
|
||||
|
||||
async def extract_code_summary(content: str):
|
||||
|
||||
return await extract_summary(content, response_model=SummarizedCode)
|
||||
enable_mocking = os.getenv("MOCK_CODE_SUMMARY", "false")
|
||||
if isinstance(enable_mocking, bool):
|
||||
enable_mocking = str(enable_mocking).lower()
|
||||
enable_mocking = enable_mocking in ("true", "1", "yes")
|
||||
|
||||
if enable_mocking:
|
||||
result = get_mock_summarized_code()
|
||||
return result
|
||||
else:
|
||||
result = await extract_summary(content, response_model=SummarizedCode)
|
||||
return result
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ async def get_repo_file_dependencies(repo_path: str) -> AsyncGenerator[list, Non
|
|||
|
||||
yield repo
|
||||
|
||||
with ProcessPoolExecutor() as executor:
|
||||
with ProcessPoolExecutor(max_workers = 12) as executor:
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
tasks = [
|
||||
|
|
|
|||
37
cognee/tasks/summarization/mock_summary.py
Normal file
37
cognee/tasks/summarization/mock_summary.py
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
from cognee.shared.data_models import SummarizedCode, SummarizedClass, SummarizedFunction
|
||||
|
||||
def get_mock_summarized_code() -> SummarizedCode:
|
||||
return SummarizedCode(
|
||||
file_name="mock_file.py",
|
||||
high_level_summary="This is a mock high-level summary.",
|
||||
key_features=["Mock feature 1", "Mock feature 2"],
|
||||
imports=["mock_import1", "mock_import2"],
|
||||
constants=["MOCK_CONSTANT = 'mock_value'"],
|
||||
classes=[
|
||||
SummarizedClass(
|
||||
name="MockClass",
|
||||
description="This is a mock description of the MockClass.",
|
||||
methods=[
|
||||
SummarizedFunction(
|
||||
name="mock_method",
|
||||
description="This is a description of the mock method.",
|
||||
docstring="This is a mock method.",
|
||||
inputs=["mock_input: str"],
|
||||
outputs=["mock_output: str"],
|
||||
decorators=None,
|
||||
)
|
||||
],
|
||||
)
|
||||
],
|
||||
functions=[
|
||||
SummarizedFunction(
|
||||
name="mock_function",
|
||||
description="This is a description of the mock function.",
|
||||
docstring="This is a mock function.",
|
||||
inputs=["mock_input: str"],
|
||||
outputs=["mock_output: str"],
|
||||
decorators=None,
|
||||
)
|
||||
],
|
||||
workflow_description="This is a mock workflow description.",
|
||||
)
|
||||
215
notebooks/cognee_hotpot_eval.ipynb
Normal file
215
notebooks/cognee_hotpot_eval.ipynb
Normal file
|
|
@ -0,0 +1,215 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Evaluation on the hotpotQA dataset"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from evals.eval_on_hotpot import eval_on_hotpotQA\n",
|
||||
"from evals.eval_on_hotpot import answer_with_cognee\n",
|
||||
"from evals.eval_on_hotpot import answer_without_cognee\n",
|
||||
"from evals.eval_on_hotpot import eval_answers\n",
|
||||
"from cognee.base_config import get_base_config\n",
|
||||
"from pathlib import Path\n",
|
||||
"from tqdm import tqdm\n",
|
||||
"import wget\n",
|
||||
"import json\n",
|
||||
"import statistics"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Getting the answers for the first num_samples questions of the dataset"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"answer_provider = answer_with_cognee # For native LLM answers use answer_without_cognee\n",
|
||||
"num_samples = 10 # With cognee, it takes ~1m10s per sample\n",
|
||||
"\n",
|
||||
"base_config = get_base_config()\n",
|
||||
"data_root_dir = base_config.data_root_directory\n",
|
||||
"\n",
|
||||
"if not Path(data_root_dir).exists():\n",
|
||||
" Path(data_root_dir).mkdir()\n",
|
||||
"\n",
|
||||
"filepath = data_root_dir / Path(\"hotpot_dev_fullwiki_v1.json\")\n",
|
||||
"if not filepath.exists():\n",
|
||||
" url = 'http://curtis.ml.cmu.edu/datasets/hotpot/hotpot_dev_fullwiki_v1.json'\n",
|
||||
" wget.download(url, out=data_root_dir)\n",
|
||||
"\n",
|
||||
"with open(filepath, \"r\") as file:\n",
|
||||
" dataset = json.load(file)\n",
|
||||
"\n",
|
||||
"instances = dataset if not num_samples else dataset[:num_samples]\n",
|
||||
"answers = []\n",
|
||||
"for instance in tqdm(instances, desc=\"Getting answers\"):\n",
|
||||
" answer = await answer_provider(instance)\n",
|
||||
" answers.append(answer)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Calculating the official HotpotQA benchmark metrics: F1 score and EM"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from evals.deepeval_metrics import f1_score_metric\n",
|
||||
"from evals.deepeval_metrics import em_score_metric"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"f1_metric = f1_score_metric()\n",
|
||||
"eval_results = await eval_answers(instances, answers, f1_metric)\n",
|
||||
"avg_f1_score = statistics.mean([result.metrics_data[0].score for result in eval_results.test_results])\n",
|
||||
"print(\"F1 score: \", avg_f1_score)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"em_metric = em_score_metric()\n",
|
||||
"eval_results = await eval_answers(instances, answers, em_metric)\n",
|
||||
"avg_em_score = statistics.mean([result.metrics_data[0].score for result in eval_results.test_results])\n",
|
||||
"print(\"EM score: \", avg_em_score)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Calculating a custom metric called Correctness\n",
|
||||
"##### Correctness is judged by an LLM"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from evals.deepeval_metrics import correctness_metric"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"eval_results = await eval_answers(instances, answers, correctness_metric) # note that instantiation is not needed for correctness metric as it is already an instance\n",
|
||||
"avg_correctness_score = statistics.mean([result.metrics_data[0].score for result in eval_results.test_results])\n",
|
||||
"print(\"Correctness score: \", avg_correctness_score)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Using a metric from Deepeval"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from deepeval.metrics import AnswerRelevancyMetric"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"relevancy_metric = AnswerRelevancyMetric()\n",
|
||||
"eval_results = await eval_answers(instances, answers, relevancy_metric) # note that instantiation is not needed for correctness metric as it is already an instance\n",
|
||||
"avg_relevancy_score = statistics.mean([result.metrics_data[0].score for result in eval_results.test_results])\n",
|
||||
"print(\"Relevancy score: \", avg_relevancy_score)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Answering and eval in one step"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"answer_provider = answer_without_cognee\n",
|
||||
"f1_metric = f1_score_metric()\n",
|
||||
"f1_score = await eval_on_hotpotQA(answer_provider, num_samples=10, eval_metric=f1_metric) # takes ~1m10s per sample\n",
|
||||
"print(\"F1 score: \", f1_score)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "myenv",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.20"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
||||
989
poetry.lock
generated
989
poetry.lock
generated
File diff suppressed because it is too large
Load diff
|
|
@ -46,7 +46,7 @@ aiofiles = "^23.2.1"
|
|||
qdrant-client = {version = "^1.9.0", optional = true}
|
||||
graphistry = "^0.33.5"
|
||||
tenacity = "^8.4.1"
|
||||
weaviate-client = {version = "4.6.7", optional = true}
|
||||
weaviate-client = {version = "4.9.6", optional = true}
|
||||
scikit-learn = "^1.5.0"
|
||||
pypdf = "^4.1.0"
|
||||
neo4j = {version = "^5.20.0", optional = true}
|
||||
|
|
@ -60,7 +60,7 @@ posthog = {version = "^3.5.0", optional = true}
|
|||
lancedb = "0.15.0"
|
||||
litellm = "1.49.1"
|
||||
groq = {version = "0.8.0", optional = true}
|
||||
langfuse = {version = "^2.32.0", optional = true}
|
||||
langfuse = "^2.32.0"
|
||||
pydantic-settings = "^2.2.1"
|
||||
anthropic = "^0.26.1"
|
||||
sentry-sdk = {extras = ["fastapi"], version = "^2.9.0"}
|
||||
|
|
@ -74,6 +74,7 @@ deepeval = {version = "^2.0.1", optional = true}
|
|||
transformers = "^4.46.3"
|
||||
pymilvus = {version = "^2.5.0", optional = true}
|
||||
unstructured = { extras = ["csv", "doc", "docx", "epub", "md", "odt", "org", "ppt", "pptx", "rst", "rtf", "tsv", "xlsx"], version = "^0.16.10", optional = true }
|
||||
httpx = "0.27.0"
|
||||
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue