From d099cae128431897aa20dc973df9d16da46f5f5e Mon Sep 17 00:00:00 2001 From: Vasilije <8619304+Vasilije1990@users.noreply.github.com> Date: Sun, 19 May 2024 22:39:47 +0200 Subject: [PATCH] Add evals for cognee --- evals/generate_test_set.py | 6 ++-- evals/simple_rag_vs_cognee_eval.py | 56 ++++++++++++++++++++++-------- 2 files changed, 45 insertions(+), 17 deletions(-) diff --git a/evals/generate_test_set.py b/evals/generate_test_set.py index 9099a81ed..58bf4222a 100644 --- a/evals/generate_test_set.py +++ b/evals/generate_test_set.py @@ -22,8 +22,10 @@ dotenv.load_dotenv() dataset = EvaluationDataset() dataset.generate_goldens_from_docs( - document_paths=['soldiers_home.pdf'], - max_goldens_per_document=10 + document_paths=['natural_language_processing.txt', 'soldiers_home.pdf', 'trump.txt'], + max_goldens_per_document=10, + num_evolutions=5, + enable_breadth_evolve=True, ) diff --git a/evals/simple_rag_vs_cognee_eval.py b/evals/simple_rag_vs_cognee_eval.py index 87506a5b2..cbac4ebc9 100644 --- a/evals/simple_rag_vs_cognee_eval.py +++ b/evals/simple_rag_vs_cognee_eval.py @@ -13,14 +13,32 @@ from cognee.infrastructure.llm.get_llm_client import get_llm_client dataset = EvaluationDataset() dataset.add_test_cases_from_json_file( # file_path is the absolute path to you .json file - file_path="synthetic_data/20240519_185842.json", - input_key_name="query", + file_path="./synthetic_data/20240519_185842.json", + input_key_name="input", actual_output_key_name="actual_output", expected_output_key_name="expected_output", - context_key_name="context", - retrieval_context_key_name="retrieval_context", + context_key_name="context" ) +print(dataset) +# from deepeval.synthesizer import Synthesizer +# +# synthesizer = Synthesizer(model="gpt-3.5-turbo") +# +# dataset = EvaluationDataset() +# dataset.generate_goldens_from_docs( +# synthesizer=synthesizer, +# document_paths=['natural_language_processing.txt', 'soldiers_home.pdf', 'trump.txt'], +# max_goldens_per_document=10, +# num_evolutions=5, +# enable_breadth_evolve=True, +# ) + + +print(dataset.goldens) +print(dataset) + + import logging @@ -29,43 +47,51 @@ from cognee.infrastructure import infrastructure_config logger = logging.getLogger(__name__) -def AnswerModel(BaseModel): +class AnswerModel(BaseModel): response:str -def get_answer_base(content: str, response_model: Type[BaseModel]): +def get_answer_base(content: str, context:str, response_model: Type[BaseModel]): llm_client = get_llm_client() - system_prompt = "Answer the following question: and use the context" + system_prompt = "THIS IS YOUR CONTEXT:" + str(context) return llm_client.create_structured_output(content, system_prompt, response_model) -def get_answer(content: str, model: Type[BaseModel]= AnswerModel): +def get_answer(content: str,context, model: Type[BaseModel]= AnswerModel): try: return (get_answer_base( content, + context, model )) except Exception as error: logger.error("Error extracting cognitive layers from content: %s", error, exc_info = True) raise error +def run_cognify_base_rag_and_search(): + pass + + +def run_cognify_and_search(): + pass -def convert_goldens_to_test_cases(goldens: List[Golden]) -> List[LLMTestCase]: +def convert_goldens_to_test_cases(test_cases_raw: List[LLMTestCase]) -> List[LLMTestCase]: test_cases = [] - for golden in goldens: + for case in test_cases_raw: test_case = LLMTestCase( - input=golden.input, + input=case.input, # Generate actual output using the 'input' and 'additional_metadata' - actual_output= get_answer(golden.input), - expected_output=golden.expected_output, - context=golden.context, + actual_output= str(get_answer(case.input, case.context).model_dump()['response']), + expected_output=case.expected_output, + context=case.context, + retrieval_context=["retrieval_context"], ) test_cases.append(test_case) return test_cases # Data preprocessing before setting the dataset test cases -dataset.test_cases = convert_goldens_to_test_cases(dataset.goldens) +dataset.test_cases = convert_goldens_to_test_cases(dataset.test_cases) from deepeval.metrics import HallucinationMetric