add custom metric implementation
This commit is contained in:
parent
4aa634d5e1
commit
996b3a658b
1 changed files with 14 additions and 0 deletions
14
evals/deepeval_metrics.py
Normal file
14
evals/deepeval_metrics.py
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
from deepeval.metrics import GEval
|
||||
from deepeval.test_case import LLMTestCaseParams
|
||||
|
||||
correctness_metric = GEval(
|
||||
name="Correctness",
|
||||
model="gpt-4o-mini",
|
||||
evaluation_params=[
|
||||
LLMTestCaseParams.ACTUAL_OUTPUT,
|
||||
LLMTestCaseParams.EXPECTED_OUTPUT
|
||||
],
|
||||
evaluation_steps=[
|
||||
"Determine whether the actual output is factually correct based on the expected output."
|
||||
]
|
||||
)
|
||||
Loading…
Add table
Reference in a new issue