Add evals for cognee
This commit is contained in:
parent
2657aa7096
commit
d099cae128
2 changed files with 45 additions and 17 deletions
|
|
@ -22,8 +22,10 @@ dotenv.load_dotenv()
|
|||
|
||||
dataset = EvaluationDataset()
|
||||
dataset.generate_goldens_from_docs(
|
||||
document_paths=['soldiers_home.pdf'],
|
||||
max_goldens_per_document=10
|
||||
document_paths=['natural_language_processing.txt', 'soldiers_home.pdf', 'trump.txt'],
|
||||
max_goldens_per_document=10,
|
||||
num_evolutions=5,
|
||||
enable_breadth_evolve=True,
|
||||
)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -13,14 +13,32 @@ from cognee.infrastructure.llm.get_llm_client import get_llm_client
|
|||
dataset = EvaluationDataset()
|
||||
dataset.add_test_cases_from_json_file(
|
||||
# file_path is the absolute path to you .json file
|
||||
file_path="synthetic_data/20240519_185842.json",
|
||||
input_key_name="query",
|
||||
file_path="./synthetic_data/20240519_185842.json",
|
||||
input_key_name="input",
|
||||
actual_output_key_name="actual_output",
|
||||
expected_output_key_name="expected_output",
|
||||
context_key_name="context",
|
||||
retrieval_context_key_name="retrieval_context",
|
||||
context_key_name="context"
|
||||
)
|
||||
|
||||
print(dataset)
|
||||
# from deepeval.synthesizer import Synthesizer
|
||||
#
|
||||
# synthesizer = Synthesizer(model="gpt-3.5-turbo")
|
||||
#
|
||||
# dataset = EvaluationDataset()
|
||||
# dataset.generate_goldens_from_docs(
|
||||
# synthesizer=synthesizer,
|
||||
# document_paths=['natural_language_processing.txt', 'soldiers_home.pdf', 'trump.txt'],
|
||||
# max_goldens_per_document=10,
|
||||
# num_evolutions=5,
|
||||
# enable_breadth_evolve=True,
|
||||
# )
|
||||
|
||||
|
||||
print(dataset.goldens)
|
||||
print(dataset)
|
||||
|
||||
|
||||
|
||||
|
||||
import logging
|
||||
|
|
@ -29,43 +47,51 @@ from cognee.infrastructure import infrastructure_config
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def AnswerModel(BaseModel):
|
||||
class AnswerModel(BaseModel):
|
||||
response:str
|
||||
def get_answer_base(content: str, response_model: Type[BaseModel]):
|
||||
def get_answer_base(content: str, context:str, response_model: Type[BaseModel]):
|
||||
llm_client = get_llm_client()
|
||||
|
||||
system_prompt = "Answer the following question: and use the context"
|
||||
system_prompt = "THIS IS YOUR CONTEXT:" + str(context)
|
||||
|
||||
return llm_client.create_structured_output(content, system_prompt, response_model)
|
||||
def get_answer(content: str, model: Type[BaseModel]= AnswerModel):
|
||||
def get_answer(content: str,context, model: Type[BaseModel]= AnswerModel):
|
||||
|
||||
try:
|
||||
return (get_answer_base(
|
||||
content,
|
||||
context,
|
||||
model
|
||||
))
|
||||
except Exception as error:
|
||||
logger.error("Error extracting cognitive layers from content: %s", error, exc_info = True)
|
||||
raise error
|
||||
|
||||
def run_cognify_base_rag_and_search():
|
||||
pass
|
||||
|
||||
|
||||
def run_cognify_and_search():
|
||||
pass
|
||||
|
||||
|
||||
|
||||
def convert_goldens_to_test_cases(goldens: List[Golden]) -> List[LLMTestCase]:
|
||||
def convert_goldens_to_test_cases(test_cases_raw: List[LLMTestCase]) -> List[LLMTestCase]:
|
||||
test_cases = []
|
||||
for golden in goldens:
|
||||
for case in test_cases_raw:
|
||||
test_case = LLMTestCase(
|
||||
input=golden.input,
|
||||
input=case.input,
|
||||
# Generate actual output using the 'input' and 'additional_metadata'
|
||||
actual_output= get_answer(golden.input),
|
||||
expected_output=golden.expected_output,
|
||||
context=golden.context,
|
||||
actual_output= str(get_answer(case.input, case.context).model_dump()['response']),
|
||||
expected_output=case.expected_output,
|
||||
context=case.context,
|
||||
retrieval_context=["retrieval_context"],
|
||||
)
|
||||
test_cases.append(test_case)
|
||||
return test_cases
|
||||
|
||||
# Data preprocessing before setting the dataset test cases
|
||||
dataset.test_cases = convert_goldens_to_test_cases(dataset.goldens)
|
||||
dataset.test_cases = convert_goldens_to_test_cases(dataset.test_cases)
|
||||
|
||||
|
||||
from deepeval.metrics import HallucinationMetric
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue