add some spaces for readability
This commit is contained in:
parent
b98470887e
commit
a0d5102bd8
1 changed files with 15 additions and 0 deletions
|
|
@ -37,8 +37,10 @@ async def answer_without_cognee(instance):
|
|||
async def answer_with_cognee(instance):
|
||||
await cognee.prune.prune_data()
|
||||
await cognee.prune.prune_system(metadata=True)
|
||||
|
||||
for (title, sentences) in instance["context"]:
|
||||
await cognee.add("\n".join(sentences), dataset_name = "HotPotQA")
|
||||
|
||||
await cognee.cognify("HotPotQA")
|
||||
|
||||
search_results = await cognee.search(
|
||||
|
|
@ -58,11 +60,13 @@ async def answer_with_cognee(instance):
|
|||
system_prompt=system_prompt,
|
||||
response_model=str,
|
||||
)
|
||||
|
||||
return answer_prediction
|
||||
|
||||
|
||||
async def eval_answers(instances, answers, eval_metric):
|
||||
test_cases = []
|
||||
|
||||
for instance, answer in zip(instances, answers):
|
||||
test_case = LLMTestCase(
|
||||
input=instance["question"],
|
||||
|
|
@ -70,19 +74,24 @@ async def eval_answers(instances, answers, eval_metric):
|
|||
expected_output=instance["answer"]
|
||||
)
|
||||
test_cases.append(test_case)
|
||||
|
||||
eval_set = EvaluationDataset(test_cases)
|
||||
eval_results = eval_set.evaluate([eval_metric])
|
||||
|
||||
return eval_results
|
||||
|
||||
async def eval_on_hotpotQA(answer_provider, num_samples, eval_metric):
|
||||
base_config = get_base_config()
|
||||
data_root_dir = base_config.data_root_directory
|
||||
|
||||
if not Path(data_root_dir).exists():
|
||||
Path(data_root_dir).mkdir()
|
||||
|
||||
filepath = data_root_dir / Path("hotpot_dev_fullwiki_v1.json")
|
||||
if not filepath.exists():
|
||||
url = 'http://curtis.ml.cmu.edu/datasets/hotpot/hotpot_dev_fullwiki_v1.json'
|
||||
wget.download(url, out=data_root_dir)
|
||||
|
||||
with open(filepath, "r") as file:
|
||||
dataset = json.load(file)
|
||||
|
||||
|
|
@ -91,15 +100,19 @@ async def eval_on_hotpotQA(answer_provider, num_samples, eval_metric):
|
|||
for instance in tqdm(instances, desc="Getting answers"):
|
||||
answer = await answer_provider(instance)
|
||||
answers.append(answer)
|
||||
|
||||
eval_results = await eval_answers(instances, answers, eval_metric)
|
||||
avg_score = statistics.mean([result.metrics_data[0].score for result in eval_results.test_results])
|
||||
|
||||
return avg_score
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument("--with_cognee", action="store_true")
|
||||
parser.add_argument("--num_samples", type=int, default=500)
|
||||
parser.add_argument("--metric", type=str, default="correctness_metric")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
|
|
@ -107,9 +120,11 @@ if __name__ == "__main__":
|
|||
metric = metric_cls()
|
||||
except AttributeError:
|
||||
metric = getattr(evals.deepeval_metrics, args.metric)
|
||||
|
||||
if args.with_cognee:
|
||||
answer_provider = answer_with_cognee
|
||||
else:
|
||||
answer_provider = answer_without_cognee
|
||||
|
||||
avg_score = asyncio.run(eval_on_hotpotQA(answer_provider, args.num_samples, metric))
|
||||
print(f"Average {args.metric}: {avg_score}")
|
||||
Loading…
Add table
Reference in a new issue