Fix linter issues
This commit is contained in:
parent
2675836149
commit
cbd15b98a5
1 changed files with 42 additions and 55 deletions
|
|
@ -1112,16 +1112,16 @@
|
|||
},
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"from evals.eval_on_hotpot import eval_on_hotpotQA\n",
|
||||
"from evals.eval_on_hotpot import answer_with_cognee\n",
|
||||
"from evals.eval_on_hotpot import answer_without_cognee\n",
|
||||
"from evals.eval_on_hotpot import eval_answers\n",
|
||||
"from cognee.base_config import get_base_config\n",
|
||||
"from pathlib import Path\n",
|
||||
"from tqdm import tqdm\n",
|
||||
"import wget\n",
|
||||
"import json\n",
|
||||
"import statistics"
|
||||
"# from evals.eval_on_hotpot import eval_on_hotpotQA\n",
|
||||
"# from evals.eval_on_hotpot import answer_with_cognee\n",
|
||||
"# from evals.eval_on_hotpot import answer_without_cognee\n",
|
||||
"# from evals.eval_on_hotpot import eval_answers\n",
|
||||
"# from cognee.base_config import get_base_config\n",
|
||||
"# from pathlib import Path\n",
|
||||
"# from tqdm import tqdm\n",
|
||||
"# import wget\n",
|
||||
"# import json\n",
|
||||
"# import statistics"
|
||||
],
|
||||
"id": "5f36b67668fdb646",
|
||||
"outputs": [],
|
||||
|
|
@ -1136,27 +1136,27 @@
|
|||
},
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"answer_provider = answer_without_cognee # For native LLM answers use answer_without_cognee\n",
|
||||
"num_samples = 10 # With cognee, it takes ~1m10s per sample\n",
|
||||
"\n",
|
||||
"base_config = get_base_config()\n",
|
||||
"data_root_dir = base_config.data_root_directory\n",
|
||||
"\n",
|
||||
"if not Path(data_root_dir).exists():\n",
|
||||
" Path(data_root_dir).mkdir()\n",
|
||||
"\n",
|
||||
"filepath = data_root_dir / Path(\"hotpot_dev_fullwiki_v1.json\")\n",
|
||||
"if not filepath.exists():\n",
|
||||
" url = 'http://curtis.ml.cmu.edu/datasets/hotpot/hotpot_dev_fullwiki_v1.json'\n",
|
||||
" wget.download(url, out=data_root_dir)\n",
|
||||
"\n",
|
||||
"with open(filepath, \"r\") as file:\n",
|
||||
" dataset = json.load(file)\n",
|
||||
"instances = dataset if not num_samples else dataset[:num_samples]\n",
|
||||
"answers = []\n",
|
||||
"for instance in tqdm(instances, desc=\"Getting answers\"):\n",
|
||||
" answer = await answer_provider(instance)\n",
|
||||
" answers.append(answer)"
|
||||
"# answer_provider = answer_without_cognee # For native LLM answers use answer_without_cognee\n",
|
||||
"# num_samples = 10 # With cognee, it takes ~1m10s per sample\n",
|
||||
"# \n",
|
||||
"# base_config = get_base_config()\n",
|
||||
"# data_root_dir = base_config.data_root_directory\n",
|
||||
"# \n",
|
||||
"# if not Path(data_root_dir).exists():\n",
|
||||
"# Path(data_root_dir).mkdir()\n",
|
||||
"# \n",
|
||||
"# filepath = data_root_dir / Path(\"hotpot_dev_fullwiki_v1.json\")\n",
|
||||
"# if not filepath.exists():\n",
|
||||
"# url = 'http://curtis.ml.cmu.edu/datasets/hotpot/hotpot_dev_fullwiki_v1.json'\n",
|
||||
"# wget.download(url, out=data_root_dir)\n",
|
||||
"# \n",
|
||||
"# with open(filepath, \"r\") as file:\n",
|
||||
"# dataset = json.load(file)\n",
|
||||
"# instances = dataset if not num_samples else dataset[:num_samples]\n",
|
||||
"# answers = []\n",
|
||||
"# for instance in tqdm(instances, desc=\"Getting answers\"):\n",
|
||||
"# answer = await answer_provider(instance)\n",
|
||||
"# answers.append(answer)"
|
||||
],
|
||||
"id": "d5af4b516c6621a3",
|
||||
"outputs": [
|
||||
|
|
@ -1179,8 +1179,8 @@
|
|||
},
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"from evals.deepeval_metrics import f1_score_metric\n",
|
||||
"from evals.deepeval_metrics import em_score_metric"
|
||||
"# from evals.deepeval_metrics import f1_score_metric\n",
|
||||
"# from evals.deepeval_metrics import em_score_metric"
|
||||
],
|
||||
"id": "2bf69048a272158c",
|
||||
"outputs": [],
|
||||
|
|
@ -1195,10 +1195,10 @@
|
|||
},
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"f1_metric = f1_score_metric()\n",
|
||||
"eval_results = await eval_answers(instances, answers, f1_metric)\n",
|
||||
"avg_f1_score = statistics.mean([result.metrics_data[0].score for result in eval_results.test_results])\n",
|
||||
"print(\"F1 score: \", avg_f1_score)"
|
||||
"# f1_metric = f1_score_metric()\n",
|
||||
"# eval_results = await eval_answers(instances, answers, f1_metric)\n",
|
||||
"# avg_f1_score = statistics.mean([result.metrics_data[0].score for result in eval_results.test_results])\n",
|
||||
"# print(\"F1 score: \", avg_f1_score)"
|
||||
],
|
||||
"id": "72ba5f89cccbee6b",
|
||||
"outputs": [
|
||||
|
|
@ -1421,28 +1421,15 @@
|
|||
{
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2024-12-24T15:26:14.946766Z",
|
||||
"start_time": "2024-12-24T15:26:14.944741Z"
|
||||
"end_time": "2025-01-05T19:23:30.332977Z",
|
||||
"start_time": "2025-01-05T19:23:30.331538Z"
|
||||
}
|
||||
},
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"for n in range(1,4):\n",
|
||||
" print(n)"
|
||||
],
|
||||
"source": "",
|
||||
"id": "783985c35d1126de",
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"1\n",
|
||||
"2\n",
|
||||
"3\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"execution_count": 38
|
||||
"outputs": [],
|
||||
"execution_count": null
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue