From 3ba06478a88d9bbd78532727d5b050c0f4a7563d Mon Sep 17 00:00:00 2001 From: yangdx Date: Fri, 26 Sep 2025 19:27:44 +0800 Subject: [PATCH] fix http log message order for streaming respond - Move aquery_llm call outside generator - Execute query before stream starts --- lightrag/api/routers/query_routes.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/lightrag/api/routers/query_routes.py b/lightrag/api/routers/query_routes.py index 7f7dedd3..567659f8 100644 --- a/lightrag/api/routers/query_routes.py +++ b/lightrag/api/routers/query_routes.py @@ -218,9 +218,10 @@ def create_query_routes(rag, api_key: Optional[str] = None, top_k: int = 60): from fastapi.responses import StreamingResponse + # Unified approach: always use aquery_llm for all cases + result = await rag.aquery_llm(request.query, param=param) + async def stream_generator(): - # Unified approach: always use aquery_llm for all cases - result = await rag.aquery_llm(request.query, param=param) # Extract references and LLM response from unified result references = result.get("data", {}).get("references", [])