From d74b21a3469535eb1a598246b2ad53a83fd331de Mon Sep 17 00:00:00 2001 From: phact Date: Tue, 14 Oct 2025 01:39:15 -0400 Subject: [PATCH] onboarding in conftest --- tests/conftest.py | 49 +++++++++++++++++++++++++ tests/integration/test_api_endpoints.py | 49 +++++-------------------- 2 files changed, 59 insertions(+), 39 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 7c2ffc1d..9498d6e9 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -20,6 +20,55 @@ from src.session_manager import SessionManager from src.main import generate_jwt_keys +@pytest_asyncio.fixture(scope="session") +async def onboard_system(): + """Perform initial onboarding once for all tests in the session. + + This ensures the OpenRAG config is marked as edited and properly initialized + so that tests can use the /settings endpoint. + """ + from pathlib import Path + + # Delete any existing config to ensure clean onboarding + config_file = Path("config/config.yaml") + if config_file.exists(): + config_file.unlink() + + # Initialize clients + await clients.initialize() + + # Create app and perform onboarding via API + from src.main import create_app, startup_tasks + import httpx + + app = await create_app() + await startup_tasks(app.state.services) + + transport = httpx.ASGITransport(app=app) + async with httpx.AsyncClient(transport=transport, base_url="http://testserver") as client: + onboarding_payload = { + "model_provider": "openai", + "embedding_model": "text-embedding-3-small", + "llm_model": "gpt-4o-mini", + "endpoint": "https://api.openai.com/v1", + "sample_data": False, + } + resp = await client.post("/onboarding", json=onboarding_payload) + if resp.status_code not in (200, 204): + # If it fails, it might already be onboarded, which is fine + print(f"[DEBUG] Onboarding returned {resp.status_code}: {resp.text}") + else: + print(f"[DEBUG] Session onboarding completed successfully") + + yield + + # Cleanup after all tests + try: + await clients.close() + except Exception: + pass + + @pytest.fixture(scope="session") def event_loop(): """Create an instance of the default event loop for the test session.""" diff --git a/tests/integration/test_api_endpoints.py b/tests/integration/test_api_endpoints.py index d49cf224..0e7942a9 100644 --- a/tests/integration/test_api_endpoints.py +++ b/tests/integration/test_api_endpoints.py @@ -38,45 +38,6 @@ def dump_docker_logs(container_name_pattern: str = "langflow", tail: int = 100): print(f"[DEBUG] Failed to fetch docker logs for {container_name_pattern}: {e}") -async def perform_onboarding(client: httpx.AsyncClient, embedding_model: str = "text-embedding-3-small", llm_model: str = "gpt-4o-mini"): - """Perform onboarding configuration to properly initialize the system. - - This should be called once at the beginning of test session, or per-test if config is reset. - It configures the embedding model, LLM model, and initializes the OpenSearch index. - - If onboarding has already been performed (config.edited=True), this will use the /settings - endpoint instead to update the configuration. - """ - onboarding_payload = { - "model_provider": "openai", - "embedding_model": embedding_model, - "llm_model": llm_model, - "endpoint": "https://api.openai.com/v1", - "sample_data": False, - } - onboarding_resp = await client.post("/onboarding", json=onboarding_payload) - - # If onboarding fails because config is already edited, use /settings endpoint instead - if onboarding_resp.status_code == 403: - print(f"[DEBUG] Config already onboarded, using /settings endpoint instead") - settings_payload = { - "embedding_model": embedding_model, - "llm_model": llm_model, - } - settings_resp = await client.post("/settings", json=settings_payload) - if settings_resp.status_code not in (200, 204): - raise AssertionError( - f"Settings update failed: {settings_resp.status_code} {settings_resp.text}" - ) - print(f"[DEBUG] Settings updated: embedding_model={embedding_model}, llm_model={llm_model}") - elif onboarding_resp.status_code not in (200, 204): - raise AssertionError( - f"Onboarding failed: {onboarding_resp.status_code} {onboarding_resp.text}" - ) - else: - print(f"[DEBUG] Onboarding completed: embedding_model={embedding_model}, llm_model={llm_model}") - - async def wait_for_service_ready(client: httpx.AsyncClient, timeout_s: float = 30.0): """Poll existing endpoints until the app and OpenSearch are ready. @@ -433,6 +394,16 @@ async def test_langflow_chat_and_nudges_endpoints(): async with httpx.AsyncClient(transport=transport, base_url="http://testserver") as client: await wait_for_service_ready(client) + # Ensure embedding model is configured via settings + resp = await client.post( + "/settings", + json={ + "embedding_model": "text-embedding-3-small", + "llm_model": "gpt-4o-mini", + }, + ) + assert resp.status_code == 200, resp.text + warmup_file = Path("./nudges_seed.md") warmup_file.write_text( "The user may care about different fruits including apples, hardy kiwi, and bananas"