Add extensive test suites for API routes and utilities: - Implement test_search_routes.py (406 lines) for search endpoint validation - Implement test_upload_routes.py (724 lines) for document upload workflows - Implement test_s3_client.py (618 lines) for S3 storage operations - Implement test_citation_utils.py (352 lines) for citation extraction - Implement test_chunking.py (216 lines) for text chunking validation Add S3 storage client implementation: - Create lightrag/storage/s3_client.py with S3 operations - Add storage module initialization with exports - Integrate S3 client with document upload handling Enhance API routes and core functionality: - Add search_routes.py with full-text and graph search endpoints - Add upload_routes.py with multipart document upload support - Update operate.py with bulk operations and health checks - Enhance postgres_impl.py with bulk upsert and parameterized queries - Update lightrag_server.py to register new API routes - Improve utils.py with citation and formatting utilities Update dependencies and configuration: - Add S3 and test dependencies to pyproject.toml - Update docker-compose.test.yml for testing environment - Sync uv.lock with new dependencies Apply code quality improvements across all modified files: - Add type hints to function signatures - Update imports and router initialization - Fix logging and error handling
143 lines
5.1 KiB
Python
143 lines
5.1 KiB
Python
import json
|
|
import logging
|
|
import re
|
|
from pathlib import Path
|
|
|
|
import jsonlines
|
|
from openai import OpenAI
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
def batch_eval(query_file, result1_file, result2_file, output_file_path, client: OpenAI | None = None):
|
|
client = client or OpenAI()
|
|
|
|
for path in (query_file, result1_file, result2_file):
|
|
if not Path(path).is_file():
|
|
raise FileNotFoundError(f'Input file not found: {path}')
|
|
|
|
try:
|
|
with open(query_file, encoding='utf-8') as f:
|
|
data = f.read()
|
|
except Exception as exc:
|
|
logger.error(f'Failed to read query file {query_file}: {exc}')
|
|
raise
|
|
|
|
queries = re.findall(r'- Question \d+: (.+)', data)
|
|
|
|
try:
|
|
with open(result1_file, encoding='utf-8') as f:
|
|
answers1 = json.load(f)
|
|
with open(result2_file, encoding='utf-8') as f:
|
|
answers2 = json.load(f)
|
|
except Exception as exc:
|
|
logger.error(f'Failed to load result files: {exc}')
|
|
raise
|
|
|
|
answers1 = [i['result'] for i in answers1]
|
|
answers2 = [i['result'] for i in answers2]
|
|
|
|
requests = []
|
|
for i, (query, answer1, answer2) in enumerate(zip(queries, answers1, answers2, strict=True)):
|
|
sys_prompt = """
|
|
---Role---
|
|
You are an expert tasked with evaluating two answers to the same question based on three criteria: **Comprehensiveness**, **Diversity**, and **Empowerment**.
|
|
"""
|
|
|
|
prompt = f"""
|
|
You will evaluate two answers to the same question based on three criteria: **Comprehensiveness**, **Diversity**, and **Empowerment**.
|
|
|
|
- **Comprehensiveness**: How much detail does the answer provide to cover all aspects and details of the question?
|
|
- **Diversity**: How varied and rich is the answer in providing different perspectives and insights on the question?
|
|
- **Empowerment**: How well does the answer help the reader understand and make informed judgments about the topic?
|
|
|
|
For each criterion, choose the better answer (either Answer 1 or Answer 2) and explain why. Then, select an overall winner based on these three categories.
|
|
|
|
Here is the question:
|
|
{query}
|
|
|
|
Here are the two answers:
|
|
|
|
**Answer 1:**
|
|
{answer1}
|
|
|
|
**Answer 2:**
|
|
{answer2}
|
|
|
|
Evaluate both answers using the three criteria listed above and provide detailed explanations for each criterion.
|
|
|
|
Output your evaluation in the following JSON format:
|
|
|
|
{{
|
|
"Comprehensiveness": {{
|
|
"Winner": "[Answer 1 or Answer 2]",
|
|
"Explanation": "[Provide explanation here]"
|
|
}},
|
|
"Diversity": {{
|
|
"Winner": "[Answer 1 or Answer 2]",
|
|
"Explanation": "[Provide explanation here]"
|
|
}},
|
|
"Empowerment": {{
|
|
"Winner": "[Answer 1 or Answer 2]",
|
|
"Explanation": "[Provide explanation here]"
|
|
}},
|
|
"Overall Winner": {{
|
|
"Winner": "[Answer 1 or Answer 2]",
|
|
"Explanation": "[Summarize why this answer is the overall winner based on the three criteria]"
|
|
}}
|
|
}}
|
|
"""
|
|
|
|
request_data = {
|
|
'custom_id': f'request-{i + 1}',
|
|
'method': 'POST',
|
|
'url': '/v1/chat/completions',
|
|
'body': {
|
|
'model': 'gpt-4o-mini',
|
|
'messages': [
|
|
{'role': 'system', 'content': sys_prompt},
|
|
{'role': 'user', 'content': prompt},
|
|
],
|
|
},
|
|
}
|
|
|
|
requests.append(request_data)
|
|
|
|
output_dir = Path(output_file_path).parent
|
|
output_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
with jsonlines.open(output_file_path, mode='w') as writer:
|
|
for request in requests:
|
|
writer.write(request)
|
|
|
|
logger.info(f'Batch API requests written to {output_file_path}')
|
|
|
|
try:
|
|
with open(output_file_path, 'rb') as f:
|
|
batch_input_file = client.files.create(file=f, purpose='batch')
|
|
batch_input_file_id = batch_input_file.id
|
|
|
|
batch = client.batches.create(
|
|
input_file_id=batch_input_file_id,
|
|
endpoint='/v1/chat/completions',
|
|
completion_window='24h',
|
|
metadata={'description': 'nightly eval job'},
|
|
)
|
|
except Exception as exc:
|
|
logger.error(f'Error creating batch from {output_file_path}: {exc}')
|
|
raise
|
|
|
|
logger.info(f'Batch {batch.id} has been created.')
|
|
|
|
|
|
if __name__ == '__main__':
|
|
import argparse
|
|
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument('--query_file', type=str, required=True, help='Path to file containing evaluation queries')
|
|
parser.add_argument('--result1_file', type=str, required=True, help='Path to JSON file with first set of answers')
|
|
parser.add_argument('--result2_file', type=str, required=True, help='Path to JSON file with second set of answers')
|
|
parser.add_argument('--output_file_path', type=str, required=True, help='Output path for batch API requests file')
|
|
args = parser.parse_args()
|
|
|
|
batch_eval(args.query_file, args.result1_file, args.result2_file, args.output_file_path)
|