Update RAG evaluation metrics to use class instances instead of objects
• Import metric classes not instances • Instantiate metrics with () syntax
This commit is contained in:
parent
7abc687742
commit
4e4b8d7e25
1 changed files with 8 additions and 8 deletions
|
|
@ -52,10 +52,10 @@ try:
|
|||
from datasets import Dataset
|
||||
from ragas import evaluate
|
||||
from ragas.metrics import (
|
||||
answer_relevancy,
|
||||
context_precision,
|
||||
context_recall,
|
||||
faithfulness,
|
||||
AnswerRelevancy,
|
||||
ContextPrecision,
|
||||
ContextRecall,
|
||||
Faithfulness,
|
||||
)
|
||||
from ragas.llms import LangchainLLMWrapper
|
||||
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
|
||||
|
|
@ -399,10 +399,10 @@ class RAGEvaluator:
|
|||
eval_results = evaluate(
|
||||
dataset=eval_dataset,
|
||||
metrics=[
|
||||
faithfulness,
|
||||
answer_relevancy,
|
||||
context_recall,
|
||||
context_precision,
|
||||
Faithfulness(),
|
||||
AnswerRelevancy(),
|
||||
ContextRecall(),
|
||||
ContextPrecision(),
|
||||
],
|
||||
llm=self.eval_llm,
|
||||
embeddings=self.eval_embeddings,
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue