graphiti/graphiti_core/prompts/eval.py
Preston Rasmussen fd341a6f16
Add MSC benchmark and improve search performance (#157)
* test cases

* test

* benchmark

* eval updates

* improve search performance

* remove data

* formatting

* add None type to config

* update sanitization

* push version

* maketrans update

* mypy
2024-09-26 16:12:38 -04:00

90 lines
2.8 KiB
Python

"""
Copyright 2024, Zep Software, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from typing import Any, Protocol, TypedDict
from .models import Message, PromptFunction, PromptVersion
class Prompt(Protocol):
qa_prompt: PromptVersion
eval_prompt: PromptVersion
class Versions(TypedDict):
qa_prompt: PromptFunction
eval_prompt: PromptFunction
def qa_prompt(context: dict[str, Any]) -> list[Message]:
sys_prompt = """You are Alice and should respond to all questions from the first person perspective of Alice"""
user_prompt = f"""
Your task is to briefly answer the question in the way that you think Alice would answer the question.
You are given the following entity summaries and facts to help you determine the answer to your question.
<ENTITY_SUMMARIES>
{json.dumps(context['entity_summaries'])}
</ENTITY_SUMMARIES
<FACTS>
{json.dumps(context['facts'])}
</FACTS>
<QUESTION>
{context['query']}
</QUESTION>
respond with a JSON object in the following format:
{{
"ANSWER": "how Alice would answer the question"
}}
"""
return [
Message(role='system', content=sys_prompt),
Message(role='user', content=user_prompt),
]
def eval_prompt(context: dict[str, Any]) -> list[Message]:
sys_prompt = (
"""You are a judge that determines if answers to questions match a gold standard answer"""
)
user_prompt = f"""
Given the QUESTION and the gold standard ANSWER determine if the RESPONSE to the question is correct or incorrect.
Although the RESPONSE may be more verbose, mark it as correct as long as it references the same topic
as the gold standard ANSWER. Also include your reasoning for the grade.
<QUESTION>
{context['query']}
</QUESTION>
<ANSWER>
{context['answer']}
</ANSWER>
<RESPONSE>
{context['response']}
</RESPONSE>
respond with a JSON object in the following format:
{{
"is_correct": "boolean if the answer is correct or incorrect"
"reasoning": "why you determined the response was correct or incorrect"
}}
"""
return [
Message(role='system', content=sys_prompt),
Message(role='user', content=user_prompt),
]
versions: Versions = {'qa_prompt': qa_prompt, 'eval_prompt': eval_prompt}