Support 4 different rag options in eval (#439)
* QA eval dataset as argument, with hotpot and 2wikimultihop as options. Json schema validation for datasets. * Load dataset file by filename, outsource utilities * restructure metric selection * Add comprehensiveness, diversity and empowerment metrics * add promptfoo as an option * refactor RAG solution in eval;2C * LLM as a judge metrics implemented in a uniform way * Use requests.get instead of wget * clean up promptfoo config template * minor fixes * get promptfoo path instead of hardcoding * minor fixes * Add LLM as a judge prompts * Support 4 different rag options in eval * Minor refactor and logger usage
This commit is contained in:
parent
6653d73556
commit
3494521cae
2 changed files with 70 additions and 34 deletions
|
|
@ -5,39 +5,15 @@ from deepeval.dataset import EvaluationDataset
|
|||
from deepeval.test_case import LLMTestCase
|
||||
from tqdm import tqdm
|
||||
import logging
|
||||
import cognee
|
||||
from cognee.api.v1.search import SearchType
|
||||
from cognee.infrastructure.llm.get_llm_client import get_llm_client
|
||||
from cognee.infrastructure.llm.prompts import read_query_prompt, render_prompt
|
||||
from evals.qa_dataset_utils import load_qa_dataset
|
||||
from evals.qa_metrics_utils import get_metric
|
||||
from evals.qa_context_provider_utils import qa_context_providers
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def get_context_with_cognee(instance):
|
||||
await cognee.prune.prune_data()
|
||||
await cognee.prune.prune_system(metadata=True)
|
||||
|
||||
for title, sentences in instance["context"]:
|
||||
await cognee.add("\n".join(sentences), dataset_name="QA")
|
||||
await cognee.cognify("QA")
|
||||
|
||||
search_results = await cognee.search(SearchType.INSIGHTS, query_text=instance["question"])
|
||||
search_results_second = await cognee.search(
|
||||
SearchType.SUMMARIES, query_text=instance["question"]
|
||||
)
|
||||
search_results = search_results + search_results_second
|
||||
|
||||
search_results_str = "\n".join([context_item["text"] for context_item in search_results])
|
||||
|
||||
return search_results_str
|
||||
|
||||
|
||||
async def get_context_without_cognee(instance):
|
||||
return instance["context"]
|
||||
|
||||
|
||||
async def answer_qa_instance(instance, context_provider):
|
||||
context = await context_provider(instance)
|
||||
|
||||
|
|
@ -88,10 +64,10 @@ async def deepeval_on_instances(instances, context_provider, eval_metric):
|
|||
|
||||
|
||||
async def eval_on_QA_dataset(
|
||||
dataset_name_or_filename: str, context_provider, num_samples, eval_metric_name
|
||||
dataset_name_or_filename: str, context_provider_name, num_samples, eval_metric_name
|
||||
):
|
||||
dataset = load_qa_dataset(dataset_name_or_filename)
|
||||
|
||||
context_provider = qa_context_providers[context_provider_name]
|
||||
eval_metric = get_metric(eval_metric_name)
|
||||
instances = dataset if not num_samples else dataset[:num_samples]
|
||||
|
||||
|
|
@ -105,18 +81,19 @@ if __name__ == "__main__":
|
|||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument("--dataset", type=str, required=True, help="Which dataset to evaluate on")
|
||||
parser.add_argument("--with_cognee", action="store_true")
|
||||
parser.add_argument(
|
||||
"--rag_option",
|
||||
type=str,
|
||||
choices=qa_context_providers.keys(),
|
||||
required=True,
|
||||
help="RAG option to use for providing context",
|
||||
)
|
||||
parser.add_argument("--num_samples", type=int, default=500)
|
||||
parser.add_argument("--metric_name", type=str, default="Correctness")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.with_cognee:
|
||||
context_provider = get_context_with_cognee
|
||||
else:
|
||||
context_provider = get_context_without_cognee
|
||||
|
||||
avg_score = asyncio.run(
|
||||
eval_on_QA_dataset(args.dataset, context_provider, args.num_samples, args.metric_name)
|
||||
eval_on_QA_dataset(args.dataset, args.rag_option, args.num_samples, args.metric_name)
|
||||
)
|
||||
logger.info(f"Average {args.metric_name}: {avg_score}")
|
||||
|
|
|
|||
59
evals/qa_context_provider_utils.py
Normal file
59
evals/qa_context_provider_utils.py
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
import cognee
|
||||
from cognee.api.v1.search import SearchType
|
||||
from cognee.infrastructure.databases.vector import get_vector_engine
|
||||
from cognee.modules.retrieval.brute_force_triplet_search import brute_force_triplet_search
|
||||
from cognee.tasks.completion.graph_query_completion import retrieved_edges_to_string
|
||||
|
||||
|
||||
async def get_raw_context(instance: dict) -> str:
|
||||
return instance["context"]
|
||||
|
||||
|
||||
async def cognify_instance(instance: dict):
|
||||
await cognee.prune.prune_data()
|
||||
await cognee.prune.prune_system(metadata=True)
|
||||
|
||||
for title, sentences in instance["context"]:
|
||||
await cognee.add("\n".join(sentences), dataset_name="QA")
|
||||
await cognee.cognify("QA")
|
||||
|
||||
|
||||
async def get_context_with_cognee(instance: dict) -> str:
|
||||
await cognify_instance(instance)
|
||||
|
||||
insights = await cognee.search(SearchType.INSIGHTS, query_text=instance["question"])
|
||||
summaries = await cognee.search(SearchType.SUMMARIES, query_text=instance["question"])
|
||||
search_results = insights + summaries
|
||||
|
||||
search_results_str = "\n".join([context_item["text"] for context_item in search_results])
|
||||
|
||||
return search_results_str
|
||||
|
||||
|
||||
async def get_context_with_simple_rag(instance: dict) -> str:
|
||||
await cognify_instance(instance)
|
||||
|
||||
vector_engine = get_vector_engine()
|
||||
found_chunks = await vector_engine.search("document_chunk_text", instance["question"], limit=5)
|
||||
|
||||
search_results_str = "\n".join([context_item.payload["text"] for context_item in found_chunks])
|
||||
|
||||
return search_results_str
|
||||
|
||||
|
||||
async def get_context_with_brute_force_triplet_search(instance: dict) -> str:
|
||||
await cognify_instance(instance)
|
||||
|
||||
found_triplets = await brute_force_triplet_search(instance["question"], top_k=5)
|
||||
|
||||
search_results_str = retrieved_edges_to_string(found_triplets)
|
||||
|
||||
return search_results_str
|
||||
|
||||
|
||||
qa_context_providers = {
|
||||
"no_rag": get_raw_context,
|
||||
"cognee": get_context_with_cognee,
|
||||
"simple_rag": get_context_with_simple_rag,
|
||||
"brute_force": get_context_with_brute_force_triplet_search,
|
||||
}
|
||||
Loading…
Add table
Reference in a new issue