diff --git a/lightrag/llm/azure_openai.py b/lightrag/llm/azure_openai.py
index c67bae10..cb8d68df 100644
--- a/lightrag/llm/azure_openai.py
+++ b/lightrag/llm/azure_openai.py
@@ -90,7 +90,7 @@ async def azure_openai_complete_if_cache(
messages.append({"role": "user", "content": prompt})
if "response_format" in kwargs:
- response = await openai_async_client.beta.chat.completions.parse(
+ response = await openai_async_client.chat.completions.parse(
model=model, messages=messages, **kwargs
)
else:
@@ -114,7 +114,7 @@ async def azure_openai_complete_if_cache(
return inner()
else:
message = response.choices[0].message
-
+
# Handle parsed responses (structured output via response_format)
# When using beta.chat.completions.parse(), the response is in message.parsed
if hasattr(message, "parsed") and message.parsed is not None:
@@ -126,7 +126,7 @@ async def azure_openai_complete_if_cache(
content = message.content
if content and r"\u" in content:
content = safe_unicode_decode(content.encode("utf-8"))
-
+
return content
diff --git a/lightrag/llm/openai.py b/lightrag/llm/openai.py
index cea85b04..6da79c2c 100644
--- a/lightrag/llm/openai.py
+++ b/lightrag/llm/openai.py
@@ -241,7 +241,7 @@ async def openai_complete_if_cache(
try:
# Don't use async with context manager, use client directly
if "response_format" in kwargs:
- response = await openai_async_client.beta.chat.completions.parse(
+ response = await openai_async_client.chat.completions.parse(
model=model, messages=messages, **kwargs
)
else:
@@ -453,7 +453,7 @@ async def openai_complete_if_cache(
raise InvalidResponseError("Invalid response from OpenAI API")
message = response.choices[0].message
-
+
# Handle parsed responses (structured output via response_format)
# When using beta.chat.completions.parse(), the response is in message.parsed
if hasattr(message, "parsed") and message.parsed is not None:
@@ -492,7 +492,9 @@ async def openai_complete_if_cache(
reasoning_content = safe_unicode_decode(
reasoning_content.encode("utf-8")
)
- final_content = f"{reasoning_content}{final_content}"
+ final_content = (
+ f"{reasoning_content}{final_content}"
+ )
else:
# COT disabled, only use regular content
final_content = content or ""
diff --git a/pyproject.toml b/pyproject.toml
index e40452e0..1b966dae 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -58,7 +58,7 @@ api = [
"nano-vectordb",
"networkx",
"numpy>=1.24.0,<2.0.0",
- "openai>=1.0.0,<3.0.0",
+ "openai>=2.0.0,<3.0.0",
"pandas>=2.0.0,<2.4.0",
"pipmaster",
"pydantic",
@@ -115,7 +115,7 @@ offline-storage = [
offline-llm = [
# LLM provider dependencies
- "openai>=1.0.0,<3.0.0",
+ "openai>=2.0.0,<3.0.0",
"anthropic>=0.18.0,<1.0.0",
"ollama>=0.1.0,<1.0.0",
"zhipuai>=2.0.0,<3.0.0",
diff --git a/requirements-offline-llm.txt b/requirements-offline-llm.txt
index 1539552a..bcfb1451 100644
--- a/requirements-offline-llm.txt
+++ b/requirements-offline-llm.txt
@@ -14,6 +14,6 @@ google-api-core>=2.0.0,<3.0.0
google-genai>=1.0.0,<2.0.0
llama-index>=0.9.0,<1.0.0
ollama>=0.1.0,<1.0.0
-openai>=1.0.0,<3.0.0
+openai>=2.0.0,<3.0.0
voyageai>=0.2.0,<1.0.0
zhipuai>=2.0.0,<3.0.0
diff --git a/requirements-offline.txt b/requirements-offline.txt
index 50848093..87ca7a6a 100644
--- a/requirements-offline.txt
+++ b/requirements-offline.txt
@@ -19,7 +19,7 @@ google-genai>=1.0.0,<2.0.0
llama-index>=0.9.0,<1.0.0
neo4j>=5.0.0,<7.0.0
ollama>=0.1.0,<1.0.0
-openai>=1.0.0,<3.0.0
+openai>=2.0.0,<3.0.0
openpyxl>=3.0.0,<4.0.0
pycryptodome>=3.0.0,<4.0.0
pymilvus>=2.6.2,<3.0.0
diff --git a/uv.lock b/uv.lock
index 97703af0..a4f17ab4 100644
--- a/uv.lock
+++ b/uv.lock
@@ -2735,7 +2735,6 @@ requires-dist = [
{ name = "json-repair", marker = "extra == 'api'" },
{ name = "langfuse", marker = "extra == 'observability'", specifier = ">=3.8.1" },
{ name = "lightrag-hku", extras = ["api", "offline-llm", "offline-storage"], marker = "extra == 'offline'" },
- { name = "lightrag-hku", extras = ["pytest"], marker = "extra == 'evaluation'" },
{ name = "llama-index", marker = "extra == 'offline-llm'", specifier = ">=0.9.0,<1.0.0" },
{ name = "nano-vectordb" },
{ name = "nano-vectordb", marker = "extra == 'api'" },
@@ -2745,14 +2744,15 @@ requires-dist = [
{ name = "numpy", specifier = ">=1.24.0,<2.0.0" },
{ name = "numpy", marker = "extra == 'api'", specifier = ">=1.24.0,<2.0.0" },
{ name = "ollama", marker = "extra == 'offline-llm'", specifier = ">=0.1.0,<1.0.0" },
- { name = "openai", marker = "extra == 'api'", specifier = ">=1.0.0,<3.0.0" },
- { name = "openai", marker = "extra == 'offline-llm'", specifier = ">=1.0.0,<3.0.0" },
+ { name = "openai", marker = "extra == 'api'", specifier = ">=2.0.0,<3.0.0" },
+ { name = "openai", marker = "extra == 'offline-llm'", specifier = ">=2.0.0,<3.0.0" },
{ name = "openpyxl", marker = "extra == 'api'", specifier = ">=3.0.0,<4.0.0" },
{ name = "pandas", specifier = ">=2.0.0,<2.4.0" },
{ name = "pandas", marker = "extra == 'api'", specifier = ">=2.0.0,<2.4.0" },
{ name = "passlib", extras = ["bcrypt"], marker = "extra == 'api'" },
{ name = "pipmaster" },
{ name = "pipmaster", marker = "extra == 'api'" },
+ { name = "pre-commit", marker = "extra == 'evaluation'" },
{ name = "pre-commit", marker = "extra == 'pytest'" },
{ name = "psutil", marker = "extra == 'api'" },
{ name = "pycryptodome", marker = "extra == 'api'", specifier = ">=3.0.0,<4.0.0" },
@@ -2764,7 +2764,9 @@ requires-dist = [
{ name = "pypdf", marker = "extra == 'api'", specifier = ">=6.1.0" },
{ name = "pypinyin" },
{ name = "pypinyin", marker = "extra == 'api'" },
+ { name = "pytest", marker = "extra == 'evaluation'", specifier = ">=8.4.2" },
{ name = "pytest", marker = "extra == 'pytest'", specifier = ">=8.4.2" },
+ { name = "pytest-asyncio", marker = "extra == 'evaluation'", specifier = ">=1.2.0" },
{ name = "pytest-asyncio", marker = "extra == 'pytest'", specifier = ">=1.2.0" },
{ name = "python-docx", marker = "extra == 'api'", specifier = ">=0.8.11,<2.0.0" },
{ name = "python-dotenv" },