onboarding in conftest
This commit is contained in:
parent
182d4df5ab
commit
d74b21a346
2 changed files with 59 additions and 39 deletions
|
|
@ -20,6 +20,55 @@ from src.session_manager import SessionManager
|
|||
from src.main import generate_jwt_keys
|
||||
|
||||
|
||||
@pytest_asyncio.fixture(scope="session")
|
||||
async def onboard_system():
|
||||
"""Perform initial onboarding once for all tests in the session.
|
||||
|
||||
This ensures the OpenRAG config is marked as edited and properly initialized
|
||||
so that tests can use the /settings endpoint.
|
||||
"""
|
||||
from pathlib import Path
|
||||
|
||||
# Delete any existing config to ensure clean onboarding
|
||||
config_file = Path("config/config.yaml")
|
||||
if config_file.exists():
|
||||
config_file.unlink()
|
||||
|
||||
# Initialize clients
|
||||
await clients.initialize()
|
||||
|
||||
# Create app and perform onboarding via API
|
||||
from src.main import create_app, startup_tasks
|
||||
import httpx
|
||||
|
||||
app = await create_app()
|
||||
await startup_tasks(app.state.services)
|
||||
|
||||
transport = httpx.ASGITransport(app=app)
|
||||
async with httpx.AsyncClient(transport=transport, base_url="http://testserver") as client:
|
||||
onboarding_payload = {
|
||||
"model_provider": "openai",
|
||||
"embedding_model": "text-embedding-3-small",
|
||||
"llm_model": "gpt-4o-mini",
|
||||
"endpoint": "https://api.openai.com/v1",
|
||||
"sample_data": False,
|
||||
}
|
||||
resp = await client.post("/onboarding", json=onboarding_payload)
|
||||
if resp.status_code not in (200, 204):
|
||||
# If it fails, it might already be onboarded, which is fine
|
||||
print(f"[DEBUG] Onboarding returned {resp.status_code}: {resp.text}")
|
||||
else:
|
||||
print(f"[DEBUG] Session onboarding completed successfully")
|
||||
|
||||
yield
|
||||
|
||||
# Cleanup after all tests
|
||||
try:
|
||||
await clients.close()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def event_loop():
|
||||
"""Create an instance of the default event loop for the test session."""
|
||||
|
|
|
|||
|
|
@ -38,45 +38,6 @@ def dump_docker_logs(container_name_pattern: str = "langflow", tail: int = 100):
|
|||
print(f"[DEBUG] Failed to fetch docker logs for {container_name_pattern}: {e}")
|
||||
|
||||
|
||||
async def perform_onboarding(client: httpx.AsyncClient, embedding_model: str = "text-embedding-3-small", llm_model: str = "gpt-4o-mini"):
|
||||
"""Perform onboarding configuration to properly initialize the system.
|
||||
|
||||
This should be called once at the beginning of test session, or per-test if config is reset.
|
||||
It configures the embedding model, LLM model, and initializes the OpenSearch index.
|
||||
|
||||
If onboarding has already been performed (config.edited=True), this will use the /settings
|
||||
endpoint instead to update the configuration.
|
||||
"""
|
||||
onboarding_payload = {
|
||||
"model_provider": "openai",
|
||||
"embedding_model": embedding_model,
|
||||
"llm_model": llm_model,
|
||||
"endpoint": "https://api.openai.com/v1",
|
||||
"sample_data": False,
|
||||
}
|
||||
onboarding_resp = await client.post("/onboarding", json=onboarding_payload)
|
||||
|
||||
# If onboarding fails because config is already edited, use /settings endpoint instead
|
||||
if onboarding_resp.status_code == 403:
|
||||
print(f"[DEBUG] Config already onboarded, using /settings endpoint instead")
|
||||
settings_payload = {
|
||||
"embedding_model": embedding_model,
|
||||
"llm_model": llm_model,
|
||||
}
|
||||
settings_resp = await client.post("/settings", json=settings_payload)
|
||||
if settings_resp.status_code not in (200, 204):
|
||||
raise AssertionError(
|
||||
f"Settings update failed: {settings_resp.status_code} {settings_resp.text}"
|
||||
)
|
||||
print(f"[DEBUG] Settings updated: embedding_model={embedding_model}, llm_model={llm_model}")
|
||||
elif onboarding_resp.status_code not in (200, 204):
|
||||
raise AssertionError(
|
||||
f"Onboarding failed: {onboarding_resp.status_code} {onboarding_resp.text}"
|
||||
)
|
||||
else:
|
||||
print(f"[DEBUG] Onboarding completed: embedding_model={embedding_model}, llm_model={llm_model}")
|
||||
|
||||
|
||||
async def wait_for_service_ready(client: httpx.AsyncClient, timeout_s: float = 30.0):
|
||||
"""Poll existing endpoints until the app and OpenSearch are ready.
|
||||
|
||||
|
|
@ -433,6 +394,16 @@ async def test_langflow_chat_and_nudges_endpoints():
|
|||
async with httpx.AsyncClient(transport=transport, base_url="http://testserver") as client:
|
||||
await wait_for_service_ready(client)
|
||||
|
||||
# Ensure embedding model is configured via settings
|
||||
resp = await client.post(
|
||||
"/settings",
|
||||
json={
|
||||
"embedding_model": "text-embedding-3-small",
|
||||
"llm_model": "gpt-4o-mini",
|
||||
},
|
||||
)
|
||||
assert resp.status_code == 200, resp.text
|
||||
|
||||
warmup_file = Path("./nudges_seed.md")
|
||||
warmup_file.write_text(
|
||||
"The user may care about different fruits including apples, hardy kiwi, and bananas"
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue