add custom metric implementation

This commit is contained in:
Rita Aleksziev 2024-11-28 16:53:33 +01:00
parent 4aa634d5e1
commit 996b3a658b

14
evals/deepeval_metrics.py Normal file
View file

@ -0,0 +1,14 @@
from deepeval.metrics import GEval
from deepeval.test_case import LLMTestCaseParams
correctness_metric = GEval(
name="Correctness",
model="gpt-4o-mini",
evaluation_params=[
LLMTestCaseParams.ACTUAL_OUTPUT,
LLMTestCaseParams.EXPECTED_OUTPUT
],
evaluation_steps=[
"Determine whether the actual output is factually correct based on the expected output."
]
)