From 0fbb50960b683cbee291362c0bbcd759c1c5eb05 Mon Sep 17 00:00:00 2001 From: Rita Aleksziev Date: Tue, 3 Dec 2024 15:59:03 +0100 Subject: [PATCH] prompt renaming --- evals/llm_as_a_judge.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/evals/llm_as_a_judge.py b/evals/llm_as_a_judge.py index e4bb7998c..c84795642 100644 --- a/evals/llm_as_a_judge.py +++ b/evals/llm_as_a_judge.py @@ -24,7 +24,7 @@ async def answer_without_cognee(instance): "context": instance["context"], } user_prompt = render_prompt("context_for_question.txt", args) - system_prompt = read_query_prompt("answer_question.txt") + system_prompt = read_query_prompt("answer_hotpot_question.txt") llm_client = get_llm_client() answer_prediction = await llm_client.acreate_structured_output( @@ -50,7 +50,7 @@ async def answer_with_cognee(instance): "context": search_results, } user_prompt = render_prompt("context_for_question.txt", args) - system_prompt = read_query_prompt("answer_question_kg.txt") + system_prompt = read_query_prompt("answer_hotpot_using_cognee_search.txt") llm_client = get_llm_client() answer_prediction = await llm_client.acreate_structured_output(