Use logger in RAG evaluation and optimize reference content joins
This commit is contained in:
parent
963ad4c637
commit
0b5e3f9dc4
2 changed files with 99 additions and 90 deletions
|
|
@ -412,11 +412,8 @@ def create_query_routes(rag, api_key: Optional[str] = None, top_k: int = 60):
|
||||||
ref_id = chunk.get("reference_id", "")
|
ref_id = chunk.get("reference_id", "")
|
||||||
content = chunk.get("content", "")
|
content = chunk.get("content", "")
|
||||||
if ref_id and content:
|
if ref_id and content:
|
||||||
# If multiple chunks have same reference_id, concatenate
|
# Collect chunk content; join later to avoid quadratic string concatenation
|
||||||
if ref_id in ref_id_to_content:
|
ref_id_to_content.setdefault(ref_id, []).append(content)
|
||||||
ref_id_to_content[ref_id] += "\n\n" + content
|
|
||||||
else:
|
|
||||||
ref_id_to_content[ref_id] = content
|
|
||||||
|
|
||||||
# Add content to references
|
# Add content to references
|
||||||
enriched_references = []
|
enriched_references = []
|
||||||
|
|
@ -424,7 +421,7 @@ def create_query_routes(rag, api_key: Optional[str] = None, top_k: int = 60):
|
||||||
ref_copy = ref.copy()
|
ref_copy = ref.copy()
|
||||||
ref_id = ref.get("reference_id", "")
|
ref_id = ref.get("reference_id", "")
|
||||||
if ref_id in ref_id_to_content:
|
if ref_id in ref_id_to_content:
|
||||||
ref_copy["content"] = ref_id_to_content[ref_id]
|
ref_copy["content"] = "\n\n".join(ref_id_to_content[ref_id])
|
||||||
enriched_references.append(ref_copy)
|
enriched_references.append(ref_copy)
|
||||||
references = enriched_references
|
references = enriched_references
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -21,6 +21,7 @@ Results are saved to: lightrag/evaluation/results/
|
||||||
import asyncio
|
import asyncio
|
||||||
import csv
|
import csv
|
||||||
import json
|
import json
|
||||||
|
import math
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
|
@ -30,6 +31,7 @@ from typing import Any, Dict, List
|
||||||
|
|
||||||
import httpx
|
import httpx
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
|
from lightrag.utils import logger
|
||||||
|
|
||||||
# Add parent directory to path
|
# Add parent directory to path
|
||||||
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
||||||
|
|
@ -56,11 +58,21 @@ try:
|
||||||
faithfulness,
|
faithfulness,
|
||||||
)
|
)
|
||||||
except ImportError as e:
|
except ImportError as e:
|
||||||
print(f"❌ RAGAS import error: {e}")
|
logger.error("❌ RAGAS import error: %s", e)
|
||||||
print(" Install with: pip install ragas datasets")
|
logger.error(" Install with: pip install ragas datasets")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
CONNECT_TIMEOUT_SECONDS = 180.0
|
||||||
|
READ_TIMEOUT_SECONDS = 300.0
|
||||||
|
TOTAL_TIMEOUT_SECONDS = 180.0
|
||||||
|
|
||||||
|
|
||||||
|
def _is_nan(value: Any) -> bool:
|
||||||
|
"""Return True when value is a float NaN."""
|
||||||
|
return isinstance(value, float) and math.isnan(value)
|
||||||
|
|
||||||
|
|
||||||
class RAGEvaluator:
|
class RAGEvaluator:
|
||||||
"""Evaluate RAG system quality using RAGAS metrics"""
|
"""Evaluate RAG system quality using RAGAS metrics"""
|
||||||
|
|
||||||
|
|
@ -138,12 +150,14 @@ class RAGEvaluator:
|
||||||
references = result.get("references", [])
|
references = result.get("references", [])
|
||||||
|
|
||||||
# DEBUG: Inspect the API response
|
# DEBUG: Inspect the API response
|
||||||
print(f" 🔍 References Count: {len(references)}")
|
logger.debug("🔍 References Count: %s", len(references))
|
||||||
if references:
|
if references:
|
||||||
first_ref = references[0]
|
first_ref = references[0]
|
||||||
print(f" 🔍 First Reference Keys: {list(first_ref.keys())}")
|
logger.debug("🔍 First Reference Keys: %s", list(first_ref.keys()))
|
||||||
if "content" in first_ref:
|
if "content" in first_ref:
|
||||||
print(f" 🔍 Content Preview: {first_ref['content'][:100]}...")
|
logger.debug(
|
||||||
|
"🔍 Content Preview: %s...", first_ref["content"][:100]
|
||||||
|
)
|
||||||
|
|
||||||
# Extract chunk content from enriched references
|
# Extract chunk content from enriched references
|
||||||
contexts = [
|
contexts = [
|
||||||
|
|
@ -194,11 +208,13 @@ class RAGEvaluator:
|
||||||
Returns:
|
Returns:
|
||||||
Evaluation result dictionary
|
Evaluation result dictionary
|
||||||
"""
|
"""
|
||||||
|
total_cases = len(self.test_cases)
|
||||||
|
|
||||||
async with semaphore:
|
async with semaphore:
|
||||||
question = test_case["question"]
|
question = test_case["question"]
|
||||||
ground_truth = test_case["ground_truth"]
|
ground_truth = test_case["ground_truth"]
|
||||||
|
|
||||||
print(f"[{idx}/{len(self.test_cases)}] Evaluating: {question[:60]}...")
|
logger.info("[%s/%s] Evaluating: %s...", idx, total_cases, question[:60])
|
||||||
|
|
||||||
# Generate RAG response by calling actual LightRAG API
|
# Generate RAG response by calling actual LightRAG API
|
||||||
rag_response = await self.generate_rag_response(
|
rag_response = await self.generate_rag_response(
|
||||||
|
|
@ -209,11 +225,13 @@ class RAGEvaluator:
|
||||||
retrieved_contexts = rag_response["contexts"]
|
retrieved_contexts = rag_response["contexts"]
|
||||||
|
|
||||||
# DEBUG: Print what was actually retrieved
|
# DEBUG: Print what was actually retrieved
|
||||||
print(f" 📝 Retrieved {len(retrieved_contexts)} contexts")
|
logger.debug("📝 Retrieved %s contexts", len(retrieved_contexts))
|
||||||
if retrieved_contexts:
|
if retrieved_contexts:
|
||||||
print(f" 📄 First context preview: {retrieved_contexts[0][:100]}...")
|
logger.debug(
|
||||||
|
"📄 First context preview: %s...", retrieved_contexts[0][:100]
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
print(" ⚠️ WARNING: No contexts retrieved!")
|
logger.warning("⚠️ No contexts retrieved!")
|
||||||
|
|
||||||
# Prepare dataset for RAGAS evaluation with CORRECT contexts
|
# Prepare dataset for RAGAS evaluation with CORRECT contexts
|
||||||
eval_dataset = Dataset.from_dict(
|
eval_dataset = Dataset.from_dict(
|
||||||
|
|
@ -271,20 +289,16 @@ class RAGEvaluator:
|
||||||
ragas_score = sum(metrics.values()) / len(metrics) if metrics else 0
|
ragas_score = sum(metrics.values()) / len(metrics) if metrics else 0
|
||||||
result["ragas_score"] = round(ragas_score, 4)
|
result["ragas_score"] = round(ragas_score, 4)
|
||||||
|
|
||||||
# Print metrics
|
logger.info("✅ Faithfulness: %.4f", metrics["faithfulness"])
|
||||||
print(f" ✅ Faithfulness: {metrics['faithfulness']:.4f}")
|
logger.info("✅ Answer Relevance: %.4f", metrics["answer_relevance"])
|
||||||
print(f" ✅ Answer Relevance: {metrics['answer_relevance']:.4f}")
|
logger.info("✅ Context Recall: %.4f", metrics["context_recall"])
|
||||||
print(f" ✅ Context Recall: {metrics['context_recall']:.4f}")
|
logger.info("✅ Context Precision: %.4f", metrics["context_precision"])
|
||||||
print(f" ✅ Context Precision: {metrics['context_precision']:.4f}")
|
logger.info("📊 RAGAS Score: %.4f", result["ragas_score"])
|
||||||
print(f" 📊 RAGAS Score: {result['ragas_score']:.4f}\n")
|
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
import traceback
|
logger.exception("❌ Error evaluating: %s", e)
|
||||||
|
|
||||||
print(f" ❌ Error evaluating: {str(e)}")
|
|
||||||
print(f" 🔍 Full traceback:\n{traceback.format_exc()}\n")
|
|
||||||
return {
|
return {
|
||||||
"question": question,
|
"question": question,
|
||||||
"error": str(e),
|
"error": str(e),
|
||||||
|
|
@ -303,17 +317,22 @@ class RAGEvaluator:
|
||||||
# Get MAX_ASYNC from environment (default to 4 if not set)
|
# Get MAX_ASYNC from environment (default to 4 if not set)
|
||||||
max_async = int(os.getenv("MAX_ASYNC", "4"))
|
max_async = int(os.getenv("MAX_ASYNC", "4"))
|
||||||
|
|
||||||
print("\n" + "=" * 70)
|
logger.info("")
|
||||||
print("🚀 Starting RAGAS Evaluation of Portfolio RAG System")
|
logger.info("%s", "=" * 70)
|
||||||
print(f"🔧 Parallel evaluations: {max_async}")
|
logger.info("🚀 Starting RAGAS Evaluation of Portfolio RAG System")
|
||||||
print("=" * 70 + "\n")
|
logger.info("🔧 Parallel evaluations: %s", max_async)
|
||||||
|
logger.info("%s", "=" * 70)
|
||||||
|
|
||||||
# Create semaphore to limit concurrent evaluations
|
# Create semaphore to limit concurrent evaluations
|
||||||
semaphore = asyncio.Semaphore(max_async)
|
semaphore = asyncio.Semaphore(max_async)
|
||||||
|
|
||||||
# Create shared HTTP client with connection pooling and proper timeouts
|
# Create shared HTTP client with connection pooling and proper timeouts
|
||||||
# Timeout: 3 minutes for connect, 5 minutes for read (LLM can be slow)
|
# Timeout: 3 minutes for connect, 5 minutes for read (LLM can be slow)
|
||||||
timeout = httpx.Timeout(180.0, connect=180.0, read=300.0)
|
timeout = httpx.Timeout(
|
||||||
|
TOTAL_TIMEOUT_SECONDS,
|
||||||
|
connect=CONNECT_TIMEOUT_SECONDS,
|
||||||
|
read=READ_TIMEOUT_SECONDS,
|
||||||
|
)
|
||||||
limits = httpx.Limits(
|
limits = httpx.Limits(
|
||||||
max_connections=max_async * 2, # Allow some buffer
|
max_connections=max_async * 2, # Allow some buffer
|
||||||
max_keepalive_connections=max_async,
|
max_keepalive_connections=max_async,
|
||||||
|
|
@ -418,8 +437,6 @@ class RAGEvaluator:
|
||||||
}
|
}
|
||||||
|
|
||||||
# Calculate averages for each metric (handling NaN values)
|
# Calculate averages for each metric (handling NaN values)
|
||||||
import math
|
|
||||||
|
|
||||||
metrics_sum = {
|
metrics_sum = {
|
||||||
"faithfulness": 0.0,
|
"faithfulness": 0.0,
|
||||||
"answer_relevance": 0.0,
|
"answer_relevance": 0.0,
|
||||||
|
|
@ -432,39 +449,23 @@ class RAGEvaluator:
|
||||||
metrics = result.get("metrics", {})
|
metrics = result.get("metrics", {})
|
||||||
# Skip NaN values when summing
|
# Skip NaN values when summing
|
||||||
faithfulness = metrics.get("faithfulness", 0)
|
faithfulness = metrics.get("faithfulness", 0)
|
||||||
if (
|
if not _is_nan(faithfulness):
|
||||||
not math.isnan(faithfulness)
|
|
||||||
if isinstance(faithfulness, float)
|
|
||||||
else True
|
|
||||||
):
|
|
||||||
metrics_sum["faithfulness"] += faithfulness
|
metrics_sum["faithfulness"] += faithfulness
|
||||||
|
|
||||||
answer_relevance = metrics.get("answer_relevance", 0)
|
answer_relevance = metrics.get("answer_relevance", 0)
|
||||||
if (
|
if not _is_nan(answer_relevance):
|
||||||
not math.isnan(answer_relevance)
|
|
||||||
if isinstance(answer_relevance, float)
|
|
||||||
else True
|
|
||||||
):
|
|
||||||
metrics_sum["answer_relevance"] += answer_relevance
|
metrics_sum["answer_relevance"] += answer_relevance
|
||||||
|
|
||||||
context_recall = metrics.get("context_recall", 0)
|
context_recall = metrics.get("context_recall", 0)
|
||||||
if (
|
if not _is_nan(context_recall):
|
||||||
not math.isnan(context_recall)
|
|
||||||
if isinstance(context_recall, float)
|
|
||||||
else True
|
|
||||||
):
|
|
||||||
metrics_sum["context_recall"] += context_recall
|
metrics_sum["context_recall"] += context_recall
|
||||||
|
|
||||||
context_precision = metrics.get("context_precision", 0)
|
context_precision = metrics.get("context_precision", 0)
|
||||||
if (
|
if not _is_nan(context_precision):
|
||||||
not math.isnan(context_precision)
|
|
||||||
if isinstance(context_precision, float)
|
|
||||||
else True
|
|
||||||
):
|
|
||||||
metrics_sum["context_precision"] += context_precision
|
metrics_sum["context_precision"] += context_precision
|
||||||
|
|
||||||
ragas_score = result.get("ragas_score", 0)
|
ragas_score = result.get("ragas_score", 0)
|
||||||
if not math.isnan(ragas_score) if isinstance(ragas_score, float) else True:
|
if not _is_nan(ragas_score):
|
||||||
metrics_sum["ragas_score"] += ragas_score
|
metrics_sum["ragas_score"] += ragas_score
|
||||||
|
|
||||||
# Calculate averages
|
# Calculate averages
|
||||||
|
|
@ -473,13 +474,13 @@ class RAGEvaluator:
|
||||||
for k, v in metrics_sum.items():
|
for k, v in metrics_sum.items():
|
||||||
avg_val = v / n if n > 0 else 0
|
avg_val = v / n if n > 0 else 0
|
||||||
# Handle NaN in average
|
# Handle NaN in average
|
||||||
avg_metrics[k] = round(avg_val, 4) if not math.isnan(avg_val) else 0.0
|
avg_metrics[k] = round(avg_val, 4) if not _is_nan(avg_val) else 0.0
|
||||||
|
|
||||||
# Find min and max RAGAS scores (filter out NaN)
|
# Find min and max RAGAS scores (filter out NaN)
|
||||||
ragas_scores = []
|
ragas_scores = []
|
||||||
for r in valid_results:
|
for r in valid_results:
|
||||||
score = r.get("ragas_score", 0)
|
score = r.get("ragas_score", 0)
|
||||||
if isinstance(score, float) and math.isnan(score):
|
if _is_nan(score):
|
||||||
continue # Skip NaN values
|
continue # Skip NaN values
|
||||||
ragas_scores.append(score)
|
ragas_scores.append(score)
|
||||||
|
|
||||||
|
|
@ -525,43 +526,53 @@ class RAGEvaluator:
|
||||||
)
|
)
|
||||||
with open(json_path, "w") as f:
|
with open(json_path, "w") as f:
|
||||||
json.dump(summary, f, indent=2)
|
json.dump(summary, f, indent=2)
|
||||||
print(f"✅ JSON results saved to: {json_path}")
|
logger.info("✅ JSON results saved to: %s", json_path)
|
||||||
|
|
||||||
# Export to CSV
|
# Export to CSV
|
||||||
csv_path = self._export_to_csv(results)
|
csv_path = self._export_to_csv(results)
|
||||||
print(f"✅ CSV results saved to: {csv_path}")
|
logger.info("✅ CSV results saved to: %s", csv_path)
|
||||||
|
|
||||||
# Print summary
|
# Print summary
|
||||||
print("\n" + "=" * 70)
|
logger.info("")
|
||||||
print("📊 EVALUATION COMPLETE")
|
logger.info("%s", "=" * 70)
|
||||||
print("=" * 70)
|
logger.info("📊 EVALUATION COMPLETE")
|
||||||
print(f"Total Tests: {len(results)}")
|
logger.info("%s", "=" * 70)
|
||||||
print(f"Successful: {benchmark_stats['successful_tests']}")
|
logger.info("Total Tests: %s", len(results))
|
||||||
print(f"Failed: {benchmark_stats['failed_tests']}")
|
logger.info("Successful: %s", benchmark_stats["successful_tests"])
|
||||||
print(f"Success Rate: {benchmark_stats['success_rate']:.2f}%")
|
logger.info("Failed: %s", benchmark_stats["failed_tests"])
|
||||||
print(f"Elapsed Time: {elapsed_time:.2f} seconds")
|
logger.info("Success Rate: %.2f%%", benchmark_stats["success_rate"])
|
||||||
print(f"Avg Time/Test: {elapsed_time / len(results):.2f} seconds")
|
logger.info("Elapsed Time: %.2f seconds", elapsed_time)
|
||||||
|
logger.info("Avg Time/Test: %.2f seconds", elapsed_time / len(results))
|
||||||
|
|
||||||
# Print benchmark metrics
|
# Print benchmark metrics
|
||||||
print("\n" + "=" * 70)
|
logger.info("")
|
||||||
print("📈 BENCHMARK RESULTS (Moyennes)")
|
logger.info("%s", "=" * 70)
|
||||||
print("=" * 70)
|
logger.info("📈 BENCHMARK RESULTS (Moyennes)")
|
||||||
|
logger.info("%s", "=" * 70)
|
||||||
avg = benchmark_stats["average_metrics"]
|
avg = benchmark_stats["average_metrics"]
|
||||||
print(f"Moyenne Faithfulness: {avg['faithfulness']:.4f}")
|
logger.info("Moyenne Faithfulness: %.4f", avg["faithfulness"])
|
||||||
print(f"Moyenne Answer Relevance: {avg['answer_relevance']:.4f}")
|
logger.info("Moyenne Answer Relevance: %.4f", avg["answer_relevance"])
|
||||||
print(f"Moyenne Context Recall: {avg['context_recall']:.4f}")
|
logger.info("Moyenne Context Recall: %.4f", avg["context_recall"])
|
||||||
print(f"Moyenne Context Precision: {avg['context_precision']:.4f}")
|
logger.info("Moyenne Context Precision: %.4f", avg["context_precision"])
|
||||||
print(f"Moyenne RAGAS Score: {avg['ragas_score']:.4f}")
|
logger.info("Moyenne RAGAS Score: %.4f", avg["ragas_score"])
|
||||||
print(f"\nMin RAGAS Score: {benchmark_stats['min_ragas_score']:.4f}")
|
logger.info("")
|
||||||
print(f"Max RAGAS Score: {benchmark_stats['max_ragas_score']:.4f}")
|
logger.info(
|
||||||
|
"Min RAGAS Score: %.4f",
|
||||||
|
benchmark_stats["min_ragas_score"],
|
||||||
|
)
|
||||||
|
logger.info(
|
||||||
|
"Max RAGAS Score: %.4f",
|
||||||
|
benchmark_stats["max_ragas_score"],
|
||||||
|
)
|
||||||
|
|
||||||
print("\n" + "=" * 70)
|
logger.info("")
|
||||||
print("📁 GENERATED FILES")
|
logger.info("%s", "=" * 70)
|
||||||
print("=" * 70)
|
logger.info("📁 GENERATED FILES")
|
||||||
print(f"Results Dir: {self.results_dir.absolute()}")
|
logger.info("%s", "=" * 70)
|
||||||
print(f" • CSV: {csv_path.name}")
|
logger.info("Results Dir: %s", self.results_dir.absolute())
|
||||||
print(f" • JSON: {json_path.name}")
|
logger.info(" • CSV: %s", csv_path.name)
|
||||||
print("=" * 70 + "\n")
|
logger.info(" • JSON: %s", json_path.name)
|
||||||
|
logger.info("%s", "=" * 70)
|
||||||
|
|
||||||
return summary
|
return summary
|
||||||
|
|
||||||
|
|
@ -581,19 +592,20 @@ async def main():
|
||||||
if len(sys.argv) > 1:
|
if len(sys.argv) > 1:
|
||||||
rag_api_url = sys.argv[1]
|
rag_api_url = sys.argv[1]
|
||||||
|
|
||||||
print("\n" + "=" * 70)
|
logger.info("")
|
||||||
print("🔍 RAGAS Evaluation - Using Real LightRAG API")
|
logger.info("%s", "=" * 70)
|
||||||
print("=" * 70)
|
logger.info("🔍 RAGAS Evaluation - Using Real LightRAG API")
|
||||||
|
logger.info("%s", "=" * 70)
|
||||||
if rag_api_url:
|
if rag_api_url:
|
||||||
print(f"📡 RAG API URL: {rag_api_url}")
|
logger.info("📡 RAG API URL: %s", rag_api_url)
|
||||||
else:
|
else:
|
||||||
print("📡 RAG API URL: http://localhost:9621 (default)")
|
logger.info("📡 RAG API URL: http://localhost:9621 (default)")
|
||||||
print("=" * 70 + "\n")
|
logger.info("%s", "=" * 70)
|
||||||
|
|
||||||
evaluator = RAGEvaluator(rag_api_url=rag_api_url)
|
evaluator = RAGEvaluator(rag_api_url=rag_api_url)
|
||||||
await evaluator.run()
|
await evaluator.run()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"\n❌ Error: {str(e)}\n")
|
logger.exception("❌ Error: %s", e)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue