From e254c3dd812fc2482df4257477c20937f24e5339 Mon Sep 17 00:00:00 2001 From: zrguo <49157727+LarFii@users.noreply.github.com> Date: Tue, 15 Jul 2025 17:30:30 +0800 Subject: [PATCH] Update openai.py --- lightrag/llm/openai.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lightrag/llm/openai.py b/lightrag/llm/openai.py index eb74c2f1..ff10d2f5 100644 --- a/lightrag/llm/openai.py +++ b/lightrag/llm/openai.py @@ -176,6 +176,8 @@ async def openai_complete_if_cache( verbose_debug(f"Query: {prompt}") logger.debug("===== Sending Query to LLM =====") + messages = kwargs.pop("messages", messages) + try: # Don't use async with context manager, use client directly if "response_format" in kwargs: