diff --git a/lightrag/llm/openai.py b/lightrag/llm/openai.py index eb74c2f1..ff10d2f5 100644 --- a/lightrag/llm/openai.py +++ b/lightrag/llm/openai.py @@ -176,6 +176,8 @@ async def openai_complete_if_cache( verbose_debug(f"Query: {prompt}") logger.debug("===== Sending Query to LLM =====") + messages = kwargs.pop("messages", messages) + try: # Don't use async with context manager, use client directly if "response_format" in kwargs: