From 4e4b8d7e25f4ff08fd161dcab7ecd6fbec0672d0 Mon Sep 17 00:00:00 2001 From: yangdx Date: Tue, 4 Nov 2025 15:56:57 +0800 Subject: [PATCH] Update RAG evaluation metrics to use class instances instead of objects MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit • Import metric classes not instances • Instantiate metrics with () syntax --- lightrag/evaluation/eval_rag_quality.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lightrag/evaluation/eval_rag_quality.py b/lightrag/evaluation/eval_rag_quality.py index ca7f710b..5785c89b 100644 --- a/lightrag/evaluation/eval_rag_quality.py +++ b/lightrag/evaluation/eval_rag_quality.py @@ -52,10 +52,10 @@ try: from datasets import Dataset from ragas import evaluate from ragas.metrics import ( - answer_relevancy, - context_precision, - context_recall, - faithfulness, + AnswerRelevancy, + ContextPrecision, + ContextRecall, + Faithfulness, ) from ragas.llms import LangchainLLMWrapper from langchain_openai import ChatOpenAI, OpenAIEmbeddings @@ -399,10 +399,10 @@ class RAGEvaluator: eval_results = evaluate( dataset=eval_dataset, metrics=[ - faithfulness, - answer_relevancy, - context_recall, - context_precision, + Faithfulness(), + AnswerRelevancy(), + ContextRecall(), + ContextPrecision(), ], llm=self.eval_llm, embeddings=self.eval_embeddings,