Add GitHub CI workflow and test markers for offline/integration tests

- Add GitHub Actions workflow for CI
- Mark integration tests requiring services
- Add offline test markers for isolated tests
- Skip integration tests by default
- Configure pytest markers and collection
This commit is contained in:
yangdx 2025-11-18 11:36:10 +08:00
parent 4fef731f37
commit 4ea2124001
8 changed files with 178 additions and 0 deletions

54
.github/workflows/tests.yml vendored Normal file
View file

@ -0,0 +1,54 @@
name: Tests
on:
push:
branches: [ main, dev ]
pull_request:
branches: [ main, dev ]
jobs:
offline-tests:
name: Offline Tests
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.10', '3.11', '3.12']
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Cache pip packages
uses: actions/cache@v4
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements*.txt', '**/pyproject.toml') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -e ".[api]"
pip install pytest pytest-asyncio
- name: Run offline tests
run: |
# Run only tests marked as 'offline' (no external dependencies)
# Integration tests requiring databases/APIs are skipped by default
pytest tests/ -m offline -v --tb=short
- name: Upload test results
if: always()
uses: actions/upload-artifact@v4
with:
name: test-results-py${{ matrix.python-version }}
path: |
.pytest_cache/
test-results.xml
retention-days: 7

View file

@ -7,6 +7,21 @@ This file provides command-line options and fixtures for test configuration.
import pytest
def pytest_configure(config):
"""Register custom markers for LightRAG tests."""
config.addinivalue_line(
"markers", "offline: marks tests as offline (no external dependencies)"
)
config.addinivalue_line(
"markers",
"integration: marks tests requiring external services (skipped by default)",
)
config.addinivalue_line("markers", "requires_db: marks tests requiring database")
config.addinivalue_line(
"markers", "requires_api: marks tests requiring LightRAG API server"
)
def pytest_addoption(parser):
"""Add custom command-line options for LightRAG tests."""
@ -32,6 +47,32 @@ def pytest_addoption(parser):
help="Number of parallel workers for stress tests (default: 3)",
)
parser.addoption(
"--run-integration",
action="store_true",
default=False,
help="Run integration tests that require external services (database, API server, etc.)",
)
def pytest_collection_modifyitems(config, items):
"""Modify test collection to skip integration tests by default.
Integration tests are skipped unless --run-integration flag is provided.
This allows running offline tests quickly without needing external services.
"""
if config.getoption("--run-integration"):
# If --run-integration is specified, run all tests
return
skip_integration = pytest.mark.skip(
reason="Requires external services(DB/API), use --run-integration to run"
)
for item in items:
if "integration" in item.keywords:
item.add_marker(skip_integration)
@pytest.fixture(scope="session")
def keep_test_artifacts(request):
@ -83,3 +124,20 @@ def parallel_workers(request):
# Fall back to environment variable
return int(os.getenv("LIGHTRAG_TEST_WORKERS", "3"))
@pytest.fixture(scope="session")
def run_integration_tests(request):
"""
Fixture to determine whether to run integration tests.
Priority: CLI option > Environment variable > Default (False)
"""
import os
# Check CLI option first
if request.config.getoption("--run-integration"):
return True
# Fall back to environment variable
return os.getenv("LIGHTRAG_RUN_INTEGRATION", "false").lower() == "true"

View file

@ -9,6 +9,7 @@ Updated to handle the new data format where:
- Includes backward compatibility with legacy format
"""
import pytest
import requests
import time
import json
@ -84,6 +85,8 @@ def parse_streaming_response(
return references, response_chunks, errors
@pytest.mark.integration
@pytest.mark.requires_api
def test_query_endpoint_references():
"""Test /query endpoint references functionality"""
@ -187,6 +190,8 @@ def test_query_endpoint_references():
return True
@pytest.mark.integration
@pytest.mark.requires_api
def test_query_stream_endpoint_references():
"""Test /query/stream endpoint references functionality"""
@ -322,6 +327,8 @@ def test_query_stream_endpoint_references():
return True
@pytest.mark.integration
@pytest.mark.requires_api
def test_references_consistency():
"""Test references consistency across all endpoints"""
@ -472,6 +479,8 @@ def test_references_consistency():
return consistency_passed
@pytest.mark.integration
@pytest.mark.requires_api
def test_aquery_data_endpoint():
"""Test the /query/data endpoint"""
@ -654,6 +663,8 @@ def print_query_results(data: Dict[str, Any]):
print("=" * 60)
@pytest.mark.integration
@pytest.mark.requires_api
def compare_with_regular_query():
"""Compare results between regular query and data query"""
@ -690,6 +701,8 @@ def compare_with_regular_query():
print(f" Regular query error: {str(e)}")
@pytest.mark.integration
@pytest.mark.requires_api
def run_all_reference_tests():
"""Run all reference-related tests"""

View file

@ -18,6 +18,7 @@ import os
import sys
import importlib
import numpy as np
import pytest
from dotenv import load_dotenv
from ascii_colors import ASCIIColors
@ -129,6 +130,8 @@ async def initialize_graph_storage():
return None
@pytest.mark.integration
@pytest.mark.requires_db
async def test_graph_basic(storage):
"""
Test basic graph database operations:
@ -254,6 +257,8 @@ async def test_graph_basic(storage):
return False
@pytest.mark.integration
@pytest.mark.requires_db
async def test_graph_advanced(storage):
"""
Test advanced graph database operations:
@ -474,6 +479,8 @@ async def test_graph_advanced(storage):
return False
@pytest.mark.integration
@pytest.mark.requires_db
async def test_graph_batch_operations(storage):
"""
Test batch operations of the graph database:
@ -827,6 +834,8 @@ async def test_graph_batch_operations(storage):
return False
@pytest.mark.integration
@pytest.mark.requires_db
async def test_graph_special_characters(storage):
"""
Test the graph database's handling of special characters:
@ -981,6 +990,8 @@ async def test_graph_special_characters(storage):
return False
@pytest.mark.integration
@pytest.mark.requires_db
async def test_graph_undirected_property(storage):
"""
Specifically test the undirected graph property of the storage:

View file

@ -9,6 +9,7 @@ This script tests the LightRAG's Ollama compatibility interface, including:
All responses use the JSON Lines format, complying with the Ollama API specification.
"""
import pytest
import requests
import json
import argparse
@ -293,6 +294,8 @@ def run_test(func: Callable, name: str) -> None:
raise
@pytest.mark.integration
@pytest.mark.requires_api
def test_non_stream_chat() -> None:
"""Test non-streaming call to /api/chat endpoint"""
url = get_base_url()
@ -317,6 +320,8 @@ def test_non_stream_chat() -> None:
)
@pytest.mark.integration
@pytest.mark.requires_api
def test_stream_chat() -> None:
"""Test streaming call to /api/chat endpoint
@ -377,6 +382,8 @@ def test_stream_chat() -> None:
print()
@pytest.mark.integration
@pytest.mark.requires_api
def test_query_modes() -> None:
"""Test different query mode prefixes
@ -436,6 +443,8 @@ def create_error_test_data(error_type: str) -> Dict[str, Any]:
return error_data.get(error_type, error_data["empty_messages"])
@pytest.mark.integration
@pytest.mark.requires_api
def test_stream_error_handling() -> None:
"""Test error handling for streaming responses
@ -482,6 +491,8 @@ def test_stream_error_handling() -> None:
response.close()
@pytest.mark.integration
@pytest.mark.requires_api
def test_error_handling() -> None:
"""Test error handling for non-streaming responses
@ -529,6 +540,8 @@ def test_error_handling() -> None:
print_json_response(response.json(), "Error message")
@pytest.mark.integration
@pytest.mark.requires_api
def test_non_stream_generate() -> None:
"""Test non-streaming call to /api/generate endpoint"""
url = get_base_url("generate")
@ -548,6 +561,8 @@ def test_non_stream_generate() -> None:
print(json.dumps(response_json, ensure_ascii=False, indent=2))
@pytest.mark.integration
@pytest.mark.requires_api
def test_stream_generate() -> None:
"""Test streaming call to /api/generate endpoint"""
url = get_base_url("generate")
@ -588,6 +603,8 @@ def test_stream_generate() -> None:
print()
@pytest.mark.integration
@pytest.mark.requires_api
def test_generate_with_system() -> None:
"""Test generate with system prompt"""
url = get_base_url("generate")
@ -616,6 +633,8 @@ def test_generate_with_system() -> None:
)
@pytest.mark.integration
@pytest.mark.requires_api
def test_generate_error_handling() -> None:
"""Test error handling for generate endpoint"""
url = get_base_url("generate")
@ -641,6 +660,8 @@ def test_generate_error_handling() -> None:
print_json_response(response.json(), "Error message")
@pytest.mark.integration
@pytest.mark.requires_api
def test_generate_concurrent() -> None:
"""Test concurrent generate requests"""
import asyncio

View file

@ -24,6 +24,8 @@ asyncpg = pytest.importorskip("asyncpg")
load_dotenv(dotenv_path=".env", override=False)
@pytest.mark.integration
@pytest.mark.requires_db
class TestPostgresRetryIntegration:
"""Integration tests for PostgreSQL retry mechanism with real database."""

View file

@ -148,6 +148,7 @@ def _assert_no_timeline_overlap(timeline: List[Tuple[str, str]]) -> None:
# =============================================================================
@pytest.mark.offline
@pytest.mark.asyncio
async def test_pipeline_status_isolation():
"""
@ -202,6 +203,7 @@ async def test_pipeline_status_isolation():
# =============================================================================
@pytest.mark.offline
@pytest.mark.asyncio
async def test_lock_mechanism(stress_test_mode, parallel_workers):
"""
@ -271,6 +273,7 @@ async def test_lock_mechanism(stress_test_mode, parallel_workers):
# =============================================================================
@pytest.mark.offline
@pytest.mark.asyncio
async def test_backward_compatibility():
"""
@ -344,6 +347,7 @@ async def test_backward_compatibility():
# =============================================================================
@pytest.mark.offline
@pytest.mark.asyncio
async def test_multi_workspace_concurrency():
"""
@ -427,6 +431,7 @@ async def test_multi_workspace_concurrency():
# =============================================================================
@pytest.mark.offline
@pytest.mark.asyncio
async def test_namespace_lock_reentrance():
"""
@ -500,6 +505,7 @@ async def test_namespace_lock_reentrance():
# =============================================================================
@pytest.mark.offline
@pytest.mark.asyncio
async def test_different_namespace_lock_isolation():
"""
@ -539,6 +545,7 @@ async def test_different_namespace_lock_isolation():
# =============================================================================
@pytest.mark.offline
@pytest.mark.asyncio
async def test_error_handling():
"""
@ -589,6 +596,7 @@ async def test_error_handling():
# =============================================================================
@pytest.mark.offline
@pytest.mark.asyncio
async def test_update_flags_workspace_isolation():
"""
@ -718,6 +726,7 @@ async def test_update_flags_workspace_isolation():
# =============================================================================
@pytest.mark.offline
@pytest.mark.asyncio
async def test_empty_workspace_standardization():
"""
@ -771,6 +780,7 @@ async def test_empty_workspace_standardization():
# =============================================================================
@pytest.mark.offline
@pytest.mark.asyncio
async def test_json_kv_storage_workspace_isolation(keep_test_artifacts):
"""
@ -852,6 +862,9 @@ async def test_json_kv_storage_workspace_isolation(keep_test_artifacts):
}
)
print(" Written to storage1: entity1, entity2")
# Persist data to disk
await storage1.index_done_callback()
print(" Persisted storage1 data to disk")
# Write to storage2
await storage2.upsert(
@ -867,6 +880,9 @@ async def test_json_kv_storage_workspace_isolation(keep_test_artifacts):
}
)
print(" Written to storage2: entity1, entity2")
# Persist data to disk
await storage2.index_done_callback()
print(" Persisted storage2 data to disk")
# Test 10.3: Read data from each storage and verify isolation
print("\nTest 10.3: Read data and verify isolation")
@ -944,6 +960,7 @@ async def test_json_kv_storage_workspace_isolation(keep_test_artifacts):
# =============================================================================
@pytest.mark.offline
@pytest.mark.asyncio
async def test_lightrag_end_to_end_workspace_isolation(keep_test_artifacts):
"""

View file

@ -11,9 +11,11 @@ This test verifies:
import os
import json
import tempfile
import pytest
from lightrag.utils import write_json, load_json, SanitizingJSONEncoder
@pytest.mark.offline
class TestWriteJsonOptimization:
"""Test write_json optimization with two-stage approach"""