From 1b0413ee7425de72b849acdfe05ae8984b94fb63 Mon Sep 17 00:00:00 2001
From: palanisd <162479981+netbrah@users.noreply.github.com>
Date: Sat, 22 Nov 2025 15:29:05 -0500
Subject: [PATCH 1/9] Create copilot-setup-steps.yml
---
.github/workflows/copilot-setup-steps.yml | 66 +++++++++++++++++++++++
1 file changed, 66 insertions(+)
create mode 100644 .github/workflows/copilot-setup-steps.yml
diff --git a/.github/workflows/copilot-setup-steps.yml b/.github/workflows/copilot-setup-steps.yml
new file mode 100644
index 00000000..5ebf5e43
--- /dev/null
+++ b/.github/workflows/copilot-setup-steps.yml
@@ -0,0 +1,66 @@
+name: "Copilot Setup Steps"
+
+# Automatically run the setup steps when they are changed to allow for easy validation, and
+# allow manual testing through the repository's "Actions" tab
+on:
+ workflow_dispatch:
+ push:
+ paths:
+ - .github/workflows/copilot-setup-steps.yml
+ pull_request:
+ paths:
+ - .github/workflows/copilot-setup-steps.yml
+
+jobs:
+ # The job MUST be called `copilot-setup-steps` or it will not be picked up by Copilot.
+ copilot-setup-steps:
+ runs-on: ubuntu-latest
+
+ # Set the permissions to the lowest permissions possible needed for your steps.
+ # Copilot will be given its own token for its operations.
+ permissions:
+ # If you want to clone the repository as part of your setup steps, for example to install dependencies,
+ # you'll need the `contents: read` permission. If you don't clone the repository in your setup steps,
+ # Copilot will do this for you automatically after the steps complete.
+ contents: read
+
+ # Timeout after 30 minutes (maximum is 59)
+ timeout-minutes: 30
+
+ # You can define any steps you want, and they will run before the agent starts.
+ # If you do not check out your code, Copilot will do this for you.
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v5
+
+ - name: Set up Python 3.11
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.11'
+
+ - name: Cache pip packages
+ uses: actions/cache@v4
+ with:
+ path: ~/.cache/pip
+ key: ${{ runner.os }}-pip-copilot-${{ hashFiles('**/pyproject.toml') }}
+ restore-keys: |
+ ${{ runner.os }}-pip-copilot-
+ ${{ runner.os }}-pip-
+
+ - name: Install Python dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install -e ".[api]"
+ pip install pytest pytest-asyncio httpx
+
+ - name: Create minimal frontend stub for Copilot agent
+ run: |
+ mkdir -p lightrag/api/webui
+ echo '
LightRAG - Copilot AgentCopilot Agent Mode
' > lightrag/api/webui/index.html
+ echo "Created minimal frontend stub for Copilot agent environment"
+
+ - name: Verify installation
+ run: |
+ python --version
+ pip list | grep lightrag
+ lightrag-server --help || echo "Note: Server requires .env configuration to run"
From c233da6318383d92f21cf6710b7ed75022f1b1af Mon Sep 17 00:00:00 2001
From: palanisd <162479981+netbrah@users.noreply.github.com>
Date: Sun, 23 Nov 2025 17:42:04 -0500
Subject: [PATCH 2/9] Update copilot-setup-steps.yml
---
.github/workflows/copilot-setup-steps.yml | 8 --------
1 file changed, 8 deletions(-)
diff --git a/.github/workflows/copilot-setup-steps.yml b/.github/workflows/copilot-setup-steps.yml
index 5ebf5e43..6b946ed1 100644
--- a/.github/workflows/copilot-setup-steps.yml
+++ b/.github/workflows/copilot-setup-steps.yml
@@ -16,14 +16,6 @@ jobs:
copilot-setup-steps:
runs-on: ubuntu-latest
- # Set the permissions to the lowest permissions possible needed for your steps.
- # Copilot will be given its own token for its operations.
- permissions:
- # If you want to clone the repository as part of your setup steps, for example to install dependencies,
- # you'll need the `contents: read` permission. If you don't clone the repository in your setup steps,
- # Copilot will do this for you automatically after the steps complete.
- contents: read
-
# Timeout after 30 minutes (maximum is 59)
timeout-minutes: 30
From 7aaa51cda9a9b2bb4810e89e9b03cd42cb7eda85 Mon Sep 17 00:00:00 2001
From: yangdx
Date: Mon, 24 Nov 2025 22:28:15 +0800
Subject: [PATCH 3/9] Add retry decorators to Neo4j read operations for
resilience
---
lightrag/kg/neo4j_impl.py | 140 ++++++++++++++++++++++++++++++++++++++
1 file changed, 140 insertions(+)
diff --git a/lightrag/kg/neo4j_impl.py b/lightrag/kg/neo4j_impl.py
index 256656d8..d3d6c4eb 100644
--- a/lightrag/kg/neo4j_impl.py
+++ b/lightrag/kg/neo4j_impl.py
@@ -352,6 +352,20 @@ class Neo4JStorage(BaseGraphStorage):
# Neo4J handles persistence automatically
pass
+ @retry(
+ stop=stop_after_attempt(3),
+ wait=wait_exponential(multiplier=1, min=4, max=10),
+ retry=retry_if_exception_type(
+ (
+ neo4jExceptions.ServiceUnavailable,
+ neo4jExceptions.TransientError,
+ neo4jExceptions.SessionExpired,
+ ConnectionResetError,
+ OSError,
+ AttributeError,
+ )
+ ),
+ )
async def has_node(self, node_id: str) -> bool:
"""
Check if a node with the given label exists in the database
@@ -385,6 +399,20 @@ class Neo4JStorage(BaseGraphStorage):
await result.consume() # Ensure results are consumed even on error
raise
+ @retry(
+ stop=stop_after_attempt(3),
+ wait=wait_exponential(multiplier=1, min=4, max=10),
+ retry=retry_if_exception_type(
+ (
+ neo4jExceptions.ServiceUnavailable,
+ neo4jExceptions.TransientError,
+ neo4jExceptions.SessionExpired,
+ ConnectionResetError,
+ OSError,
+ AttributeError,
+ )
+ ),
+ )
async def has_edge(self, source_node_id: str, target_node_id: str) -> bool:
"""
Check if an edge exists between two nodes
@@ -426,6 +454,20 @@ class Neo4JStorage(BaseGraphStorage):
await result.consume() # Ensure results are consumed even on error
raise
+ @retry(
+ stop=stop_after_attempt(3),
+ wait=wait_exponential(multiplier=1, min=4, max=10),
+ retry=retry_if_exception_type(
+ (
+ neo4jExceptions.ServiceUnavailable,
+ neo4jExceptions.TransientError,
+ neo4jExceptions.SessionExpired,
+ ConnectionResetError,
+ OSError,
+ AttributeError,
+ )
+ ),
+ )
async def get_node(self, node_id: str) -> dict[str, str] | None:
"""Get node by its label identifier, return only node properties
@@ -479,6 +521,20 @@ class Neo4JStorage(BaseGraphStorage):
)
raise
+ @retry(
+ stop=stop_after_attempt(3),
+ wait=wait_exponential(multiplier=1, min=4, max=10),
+ retry=retry_if_exception_type(
+ (
+ neo4jExceptions.ServiceUnavailable,
+ neo4jExceptions.TransientError,
+ neo4jExceptions.SessionExpired,
+ ConnectionResetError,
+ OSError,
+ AttributeError,
+ )
+ ),
+ )
async def get_nodes_batch(self, node_ids: list[str]) -> dict[str, dict]:
"""
Retrieve multiple nodes in one query using UNWIND.
@@ -515,6 +571,20 @@ class Neo4JStorage(BaseGraphStorage):
await result.consume() # Make sure to consume the result fully
return nodes
+ @retry(
+ stop=stop_after_attempt(3),
+ wait=wait_exponential(multiplier=1, min=4, max=10),
+ retry=retry_if_exception_type(
+ (
+ neo4jExceptions.ServiceUnavailable,
+ neo4jExceptions.TransientError,
+ neo4jExceptions.SessionExpired,
+ ConnectionResetError,
+ OSError,
+ AttributeError,
+ )
+ ),
+ )
async def node_degree(self, node_id: str) -> int:
"""Get the degree (number of relationships) of a node with the given label.
If multiple nodes have the same label, returns the degree of the first node.
@@ -563,6 +633,20 @@ class Neo4JStorage(BaseGraphStorage):
)
raise
+ @retry(
+ stop=stop_after_attempt(3),
+ wait=wait_exponential(multiplier=1, min=4, max=10),
+ retry=retry_if_exception_type(
+ (
+ neo4jExceptions.ServiceUnavailable,
+ neo4jExceptions.TransientError,
+ neo4jExceptions.SessionExpired,
+ ConnectionResetError,
+ OSError,
+ AttributeError,
+ )
+ ),
+ )
async def node_degrees_batch(self, node_ids: list[str]) -> dict[str, int]:
"""
Retrieve the degree for multiple nodes in a single query using UNWIND.
@@ -647,6 +731,20 @@ class Neo4JStorage(BaseGraphStorage):
edge_degrees[(src, tgt)] = degrees.get(src, 0) + degrees.get(tgt, 0)
return edge_degrees
+ @retry(
+ stop=stop_after_attempt(3),
+ wait=wait_exponential(multiplier=1, min=4, max=10),
+ retry=retry_if_exception_type(
+ (
+ neo4jExceptions.ServiceUnavailable,
+ neo4jExceptions.TransientError,
+ neo4jExceptions.SessionExpired,
+ ConnectionResetError,
+ OSError,
+ AttributeError,
+ )
+ ),
+ )
async def get_edge(
self, source_node_id: str, target_node_id: str
) -> dict[str, str] | None:
@@ -734,6 +832,20 @@ class Neo4JStorage(BaseGraphStorage):
)
raise
+ @retry(
+ stop=stop_after_attempt(3),
+ wait=wait_exponential(multiplier=1, min=4, max=10),
+ retry=retry_if_exception_type(
+ (
+ neo4jExceptions.ServiceUnavailable,
+ neo4jExceptions.TransientError,
+ neo4jExceptions.SessionExpired,
+ ConnectionResetError,
+ OSError,
+ AttributeError,
+ )
+ ),
+ )
async def get_edges_batch(
self, pairs: list[dict[str, str]]
) -> dict[tuple[str, str], dict]:
@@ -784,6 +896,20 @@ class Neo4JStorage(BaseGraphStorage):
await result.consume()
return edges_dict
+ @retry(
+ stop=stop_after_attempt(3),
+ wait=wait_exponential(multiplier=1, min=4, max=10),
+ retry=retry_if_exception_type(
+ (
+ neo4jExceptions.ServiceUnavailable,
+ neo4jExceptions.TransientError,
+ neo4jExceptions.SessionExpired,
+ ConnectionResetError,
+ OSError,
+ AttributeError,
+ )
+ ),
+ )
async def get_node_edges(self, source_node_id: str) -> list[tuple[str, str]] | None:
"""Retrieves all edges (relationships) for a particular node identified by its label.
@@ -851,6 +977,20 @@ class Neo4JStorage(BaseGraphStorage):
)
raise
+ @retry(
+ stop=stop_after_attempt(3),
+ wait=wait_exponential(multiplier=1, min=4, max=10),
+ retry=retry_if_exception_type(
+ (
+ neo4jExceptions.ServiceUnavailable,
+ neo4jExceptions.TransientError,
+ neo4jExceptions.SessionExpired,
+ ConnectionResetError,
+ OSError,
+ AttributeError,
+ )
+ ),
+ )
async def get_nodes_edges_batch(
self, node_ids: list[str]
) -> dict[str, list[tuple[str, str]]]:
From 8c4d7a00ad28b0b7b3cdebda3a4405c30cf59e2c Mon Sep 17 00:00:00 2001
From: yangdx
Date: Tue, 25 Nov 2025 01:35:21 +0800
Subject: [PATCH 4/9] Refactor: Extract retry decorator to reduce code
duplication in Neo4J storage
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
• Define READ_RETRY_EXCEPTIONS constant
• Create reusable READ_RETRY decorator
• Replace 11 duplicate retry decorators
• Improve code maintainability
• Add missing retry to edge_degrees_batch
---
lightrag/kg/neo4j_impl.py | 168 +++++++-------------------------------
1 file changed, 28 insertions(+), 140 deletions(-)
diff --git a/lightrag/kg/neo4j_impl.py b/lightrag/kg/neo4j_impl.py
index d3d6c4eb..38320643 100644
--- a/lightrag/kg/neo4j_impl.py
+++ b/lightrag/kg/neo4j_impl.py
@@ -44,6 +44,23 @@ config.read("config.ini", "utf-8")
logging.getLogger("neo4j").setLevel(logging.ERROR)
+READ_RETRY_EXCEPTIONS = (
+ neo4jExceptions.ServiceUnavailable,
+ neo4jExceptions.TransientError,
+ neo4jExceptions.SessionExpired,
+ ConnectionResetError,
+ OSError,
+ AttributeError,
+)
+
+READ_RETRY = retry(
+ stop=stop_after_attempt(3),
+ wait=wait_exponential(multiplier=1, min=4, max=10),
+ retry=retry_if_exception_type(READ_RETRY_EXCEPTIONS),
+ reraise=True,
+)
+
+
@final
@dataclass
class Neo4JStorage(BaseGraphStorage):
@@ -352,20 +369,7 @@ class Neo4JStorage(BaseGraphStorage):
# Neo4J handles persistence automatically
pass
- @retry(
- stop=stop_after_attempt(3),
- wait=wait_exponential(multiplier=1, min=4, max=10),
- retry=retry_if_exception_type(
- (
- neo4jExceptions.ServiceUnavailable,
- neo4jExceptions.TransientError,
- neo4jExceptions.SessionExpired,
- ConnectionResetError,
- OSError,
- AttributeError,
- )
- ),
- )
+ @READ_RETRY
async def has_node(self, node_id: str) -> bool:
"""
Check if a node with the given label exists in the database
@@ -399,20 +403,7 @@ class Neo4JStorage(BaseGraphStorage):
await result.consume() # Ensure results are consumed even on error
raise
- @retry(
- stop=stop_after_attempt(3),
- wait=wait_exponential(multiplier=1, min=4, max=10),
- retry=retry_if_exception_type(
- (
- neo4jExceptions.ServiceUnavailable,
- neo4jExceptions.TransientError,
- neo4jExceptions.SessionExpired,
- ConnectionResetError,
- OSError,
- AttributeError,
- )
- ),
- )
+ @READ_RETRY
async def has_edge(self, source_node_id: str, target_node_id: str) -> bool:
"""
Check if an edge exists between two nodes
@@ -454,20 +445,7 @@ class Neo4JStorage(BaseGraphStorage):
await result.consume() # Ensure results are consumed even on error
raise
- @retry(
- stop=stop_after_attempt(3),
- wait=wait_exponential(multiplier=1, min=4, max=10),
- retry=retry_if_exception_type(
- (
- neo4jExceptions.ServiceUnavailable,
- neo4jExceptions.TransientError,
- neo4jExceptions.SessionExpired,
- ConnectionResetError,
- OSError,
- AttributeError,
- )
- ),
- )
+ @READ_RETRY
async def get_node(self, node_id: str) -> dict[str, str] | None:
"""Get node by its label identifier, return only node properties
@@ -521,20 +499,7 @@ class Neo4JStorage(BaseGraphStorage):
)
raise
- @retry(
- stop=stop_after_attempt(3),
- wait=wait_exponential(multiplier=1, min=4, max=10),
- retry=retry_if_exception_type(
- (
- neo4jExceptions.ServiceUnavailable,
- neo4jExceptions.TransientError,
- neo4jExceptions.SessionExpired,
- ConnectionResetError,
- OSError,
- AttributeError,
- )
- ),
- )
+ @READ_RETRY
async def get_nodes_batch(self, node_ids: list[str]) -> dict[str, dict]:
"""
Retrieve multiple nodes in one query using UNWIND.
@@ -571,20 +536,7 @@ class Neo4JStorage(BaseGraphStorage):
await result.consume() # Make sure to consume the result fully
return nodes
- @retry(
- stop=stop_after_attempt(3),
- wait=wait_exponential(multiplier=1, min=4, max=10),
- retry=retry_if_exception_type(
- (
- neo4jExceptions.ServiceUnavailable,
- neo4jExceptions.TransientError,
- neo4jExceptions.SessionExpired,
- ConnectionResetError,
- OSError,
- AttributeError,
- )
- ),
- )
+ @READ_RETRY
async def node_degree(self, node_id: str) -> int:
"""Get the degree (number of relationships) of a node with the given label.
If multiple nodes have the same label, returns the degree of the first node.
@@ -633,20 +585,7 @@ class Neo4JStorage(BaseGraphStorage):
)
raise
- @retry(
- stop=stop_after_attempt(3),
- wait=wait_exponential(multiplier=1, min=4, max=10),
- retry=retry_if_exception_type(
- (
- neo4jExceptions.ServiceUnavailable,
- neo4jExceptions.TransientError,
- neo4jExceptions.SessionExpired,
- ConnectionResetError,
- OSError,
- AttributeError,
- )
- ),
- )
+ @READ_RETRY
async def node_degrees_batch(self, node_ids: list[str]) -> dict[str, int]:
"""
Retrieve the degree for multiple nodes in a single query using UNWIND.
@@ -705,6 +644,7 @@ class Neo4JStorage(BaseGraphStorage):
degrees = int(src_degree) + int(trg_degree)
return degrees
+ @READ_RETRY
async def edge_degrees_batch(
self, edge_pairs: list[tuple[str, str]]
) -> dict[tuple[str, str], int]:
@@ -731,20 +671,7 @@ class Neo4JStorage(BaseGraphStorage):
edge_degrees[(src, tgt)] = degrees.get(src, 0) + degrees.get(tgt, 0)
return edge_degrees
- @retry(
- stop=stop_after_attempt(3),
- wait=wait_exponential(multiplier=1, min=4, max=10),
- retry=retry_if_exception_type(
- (
- neo4jExceptions.ServiceUnavailable,
- neo4jExceptions.TransientError,
- neo4jExceptions.SessionExpired,
- ConnectionResetError,
- OSError,
- AttributeError,
- )
- ),
- )
+ @READ_RETRY
async def get_edge(
self, source_node_id: str, target_node_id: str
) -> dict[str, str] | None:
@@ -832,20 +759,7 @@ class Neo4JStorage(BaseGraphStorage):
)
raise
- @retry(
- stop=stop_after_attempt(3),
- wait=wait_exponential(multiplier=1, min=4, max=10),
- retry=retry_if_exception_type(
- (
- neo4jExceptions.ServiceUnavailable,
- neo4jExceptions.TransientError,
- neo4jExceptions.SessionExpired,
- ConnectionResetError,
- OSError,
- AttributeError,
- )
- ),
- )
+ @READ_RETRY
async def get_edges_batch(
self, pairs: list[dict[str, str]]
) -> dict[tuple[str, str], dict]:
@@ -896,20 +810,7 @@ class Neo4JStorage(BaseGraphStorage):
await result.consume()
return edges_dict
- @retry(
- stop=stop_after_attempt(3),
- wait=wait_exponential(multiplier=1, min=4, max=10),
- retry=retry_if_exception_type(
- (
- neo4jExceptions.ServiceUnavailable,
- neo4jExceptions.TransientError,
- neo4jExceptions.SessionExpired,
- ConnectionResetError,
- OSError,
- AttributeError,
- )
- ),
- )
+ @READ_RETRY
async def get_node_edges(self, source_node_id: str) -> list[tuple[str, str]] | None:
"""Retrieves all edges (relationships) for a particular node identified by its label.
@@ -977,20 +878,7 @@ class Neo4JStorage(BaseGraphStorage):
)
raise
- @retry(
- stop=stop_after_attempt(3),
- wait=wait_exponential(multiplier=1, min=4, max=10),
- retry=retry_if_exception_type(
- (
- neo4jExceptions.ServiceUnavailable,
- neo4jExceptions.TransientError,
- neo4jExceptions.SessionExpired,
- ConnectionResetError,
- OSError,
- AttributeError,
- )
- ),
- )
+ @READ_RETRY
async def get_nodes_edges_batch(
self, node_ids: list[str]
) -> dict[str, list[tuple[str, str]]]:
From 5f91063c7a8bb77673a1803637ad57cc8e04d3ce Mon Sep 17 00:00:00 2001
From: yangdx
Date: Tue, 25 Nov 2025 02:03:28 +0800
Subject: [PATCH 5/9] Add ruff as dependency to pytest and evaluation extras
---
pyproject.toml | 4 +++-
uv.lock | 30 ++++++++++++++++++++++++++++++
2 files changed, 33 insertions(+), 1 deletion(-)
diff --git a/pyproject.toml b/pyproject.toml
index 8d48b5df..31667ab9 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -47,6 +47,7 @@ pytest = [
"pytest>=8.4.2",
"pytest-asyncio>=1.2.0",
"pre-commit",
+ "ruff",
]
api = [
@@ -132,10 +133,11 @@ offline = [
]
evaluation = [
- # Test framework dependencies (for evaluation)
+ # Test framework dependencies
"pytest>=8.4.2",
"pytest-asyncio>=1.2.0",
"pre-commit",
+ "ruff",
# RAG evaluation dependencies (RAGAS framework)
"ragas>=0.3.7",
"datasets>=4.3.0",
diff --git a/uv.lock b/uv.lock
index a4f17ab4..fb483760 100644
--- a/uv.lock
+++ b/uv.lock
@@ -2615,6 +2615,7 @@ evaluation = [
{ name = "pytest" },
{ name = "pytest-asyncio" },
{ name = "ragas" },
+ { name = "ruff" },
]
observability = [
{ name = "langfuse" },
@@ -2700,6 +2701,7 @@ pytest = [
{ name = "pre-commit" },
{ name = "pytest" },
{ name = "pytest-asyncio" },
+ { name = "ruff" },
]
[package.metadata]
@@ -2778,6 +2780,8 @@ requires-dist = [
{ name = "qdrant-client", marker = "extra == 'offline-storage'", specifier = ">=1.11.0,<2.0.0" },
{ name = "ragas", marker = "extra == 'evaluation'", specifier = ">=0.3.7" },
{ name = "redis", marker = "extra == 'offline-storage'", specifier = ">=5.0.0,<8.0.0" },
+ { name = "ruff", marker = "extra == 'evaluation'" },
+ { name = "ruff", marker = "extra == 'pytest'" },
{ name = "setuptools" },
{ name = "setuptools", marker = "extra == 'api'" },
{ name = "tenacity" },
@@ -5637,6 +5641,32 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/3f/50/0a9e7e7afe7339bd5e36911f0ceb15fed51945836ed803ae5afd661057fd/rtree-1.4.1-py3-none-win_arm64.whl", hash = "sha256:3d46f55729b28138e897ffef32f7ce93ac335cb67f9120125ad3742a220800f0", size = 355253, upload-time = "2025-08-13T19:32:00.296Z" },
]
+[[package]]
+name = "ruff"
+version = "0.14.6"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/52/f0/62b5a1a723fe183650109407fa56abb433b00aa1c0b9ba555f9c4efec2c6/ruff-0.14.6.tar.gz", hash = "sha256:6f0c742ca6a7783a736b867a263b9a7a80a45ce9bee391eeda296895f1b4e1cc", size = 5669501, upload-time = "2025-11-21T14:26:17.903Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/67/d2/7dd544116d107fffb24a0064d41a5d2ed1c9d6372d142f9ba108c8e39207/ruff-0.14.6-py3-none-linux_armv6l.whl", hash = "sha256:d724ac2f1c240dbd01a2ae98db5d1d9a5e1d9e96eba999d1c48e30062df578a3", size = 13326119, upload-time = "2025-11-21T14:25:24.2Z" },
+ { url = "https://files.pythonhosted.org/packages/36/6a/ad66d0a3315d6327ed6b01f759d83df3c4d5f86c30462121024361137b6a/ruff-0.14.6-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:9f7539ea257aa4d07b7ce87aed580e485c40143f2473ff2f2b75aee003186004", size = 13526007, upload-time = "2025-11-21T14:25:26.906Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/9d/dae6db96df28e0a15dea8e986ee393af70fc97fd57669808728080529c37/ruff-0.14.6-py3-none-macosx_11_0_arm64.whl", hash = "sha256:7f6007e55b90a2a7e93083ba48a9f23c3158c433591c33ee2e99a49b889c6332", size = 12676572, upload-time = "2025-11-21T14:25:29.826Z" },
+ { url = "https://files.pythonhosted.org/packages/76/a4/f319e87759949062cfee1b26245048e92e2acce900ad3a909285f9db1859/ruff-0.14.6-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a8e7b9d73d8728b68f632aa8e824ef041d068d231d8dbc7808532d3629a6bef", size = 13140745, upload-time = "2025-11-21T14:25:32.788Z" },
+ { url = "https://files.pythonhosted.org/packages/95/d3/248c1efc71a0a8ed4e8e10b4b2266845d7dfc7a0ab64354afe049eaa1310/ruff-0.14.6-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d50d45d4553a3ebcbd33e7c5e0fe6ca4aafd9a9122492de357205c2c48f00775", size = 13076486, upload-time = "2025-11-21T14:25:35.601Z" },
+ { url = "https://files.pythonhosted.org/packages/a5/19/b68d4563fe50eba4b8c92aa842149bb56dd24d198389c0ed12e7faff4f7d/ruff-0.14.6-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:118548dd121f8a21bfa8ab2c5b80e5b4aed67ead4b7567790962554f38e598ce", size = 13727563, upload-time = "2025-11-21T14:25:38.514Z" },
+ { url = "https://files.pythonhosted.org/packages/47/ac/943169436832d4b0e867235abbdb57ce3a82367b47e0280fa7b4eabb7593/ruff-0.14.6-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:57256efafbfefcb8748df9d1d766062f62b20150691021f8ab79e2d919f7c11f", size = 15199755, upload-time = "2025-11-21T14:25:41.516Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/b9/288bb2399860a36d4bb0541cb66cce3c0f4156aaff009dc8499be0c24bf2/ruff-0.14.6-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ff18134841e5c68f8e5df1999a64429a02d5549036b394fafbe410f886e1989d", size = 14850608, upload-time = "2025-11-21T14:25:44.428Z" },
+ { url = "https://files.pythonhosted.org/packages/ee/b1/a0d549dd4364e240f37e7d2907e97ee80587480d98c7799d2d8dc7a2f605/ruff-0.14.6-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:29c4b7ec1e66a105d5c27bd57fa93203637d66a26d10ca9809dc7fc18ec58440", size = 14118754, upload-time = "2025-11-21T14:25:47.214Z" },
+ { url = "https://files.pythonhosted.org/packages/13/ac/9b9fe63716af8bdfddfacd0882bc1586f29985d3b988b3c62ddce2e202c3/ruff-0.14.6-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:167843a6f78680746d7e226f255d920aeed5e4ad9c03258094a2d49d3028b105", size = 13949214, upload-time = "2025-11-21T14:25:50.002Z" },
+ { url = "https://files.pythonhosted.org/packages/12/27/4dad6c6a77fede9560b7df6802b1b697e97e49ceabe1f12baf3ea20862e9/ruff-0.14.6-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:16a33af621c9c523b1ae006b1b99b159bf5ac7e4b1f20b85b2572455018e0821", size = 14106112, upload-time = "2025-11-21T14:25:52.841Z" },
+ { url = "https://files.pythonhosted.org/packages/6a/db/23e322d7177873eaedea59a7932ca5084ec5b7e20cb30f341ab594130a71/ruff-0.14.6-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:1432ab6e1ae2dc565a7eea707d3b03a0c234ef401482a6f1621bc1f427c2ff55", size = 13035010, upload-time = "2025-11-21T14:25:55.536Z" },
+ { url = "https://files.pythonhosted.org/packages/a8/9c/20e21d4d69dbb35e6a1df7691e02f363423658a20a2afacf2a2c011800dc/ruff-0.14.6-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:4c55cfbbe7abb61eb914bfd20683d14cdfb38a6d56c6c66efa55ec6570ee4e71", size = 13054082, upload-time = "2025-11-21T14:25:58.625Z" },
+ { url = "https://files.pythonhosted.org/packages/66/25/906ee6a0464c3125c8d673c589771a974965c2be1a1e28b5c3b96cb6ef88/ruff-0.14.6-py3-none-musllinux_1_2_i686.whl", hash = "sha256:efea3c0f21901a685fff4befda6d61a1bf4cb43de16da87e8226a281d614350b", size = 13303354, upload-time = "2025-11-21T14:26:01.816Z" },
+ { url = "https://files.pythonhosted.org/packages/4c/58/60577569e198d56922b7ead07b465f559002b7b11d53f40937e95067ca1c/ruff-0.14.6-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:344d97172576d75dc6afc0e9243376dbe1668559c72de1864439c4fc95f78185", size = 14054487, upload-time = "2025-11-21T14:26:05.058Z" },
+ { url = "https://files.pythonhosted.org/packages/67/0b/8e4e0639e4cc12547f41cb771b0b44ec8225b6b6a93393176d75fe6f7d40/ruff-0.14.6-py3-none-win32.whl", hash = "sha256:00169c0c8b85396516fdd9ce3446c7ca20c2a8f90a77aa945ba6b8f2bfe99e85", size = 13013361, upload-time = "2025-11-21T14:26:08.152Z" },
+ { url = "https://files.pythonhosted.org/packages/fb/02/82240553b77fd1341f80ebb3eaae43ba011c7a91b4224a9f317d8e6591af/ruff-0.14.6-py3-none-win_amd64.whl", hash = "sha256:390e6480c5e3659f8a4c8d6a0373027820419ac14fa0d2713bd8e6c3e125b8b9", size = 14432087, upload-time = "2025-11-21T14:26:10.891Z" },
+ { url = "https://files.pythonhosted.org/packages/a5/1f/93f9b0fad9470e4c829a5bb678da4012f0c710d09331b860ee555216f4ea/ruff-0.14.6-py3-none-win_arm64.whl", hash = "sha256:d43c81fbeae52cfa8728d8766bbf46ee4298c888072105815b392da70ca836b2", size = 13520930, upload-time = "2025-11-21T14:26:13.951Z" },
+]
+
[[package]]
name = "s3transfer"
version = "0.14.0"
From 48b67d3077b7cf54b17f92384eb77abec0066e65 Mon Sep 17 00:00:00 2001
From: yangdx
Date: Tue, 25 Nov 2025 02:51:55 +0800
Subject: [PATCH 6/9] Handle missing WebUI assets gracefully without blocking
server startup
- Change build check from error to warning
- Redirect to /docs when WebUI unavailable
- Add webui_available to health endpoint
- Only mount /webui if assets exist
- Return status tuple from build check
---
lightrag/api/lightrag_server.py | 106 +++++++++++++++++++++++---------
1 file changed, 78 insertions(+), 28 deletions(-)
diff --git a/lightrag/api/lightrag_server.py b/lightrag/api/lightrag_server.py
index b29e39b2..a8a14c66 100644
--- a/lightrag/api/lightrag_server.py
+++ b/lightrag/api/lightrag_server.py
@@ -159,19 +159,22 @@ def check_frontend_build():
"""Check if frontend is built and optionally check if source is up-to-date
Returns:
- bool: True if frontend is outdated, False if up-to-date or production environment
+ tuple: (assets_exist: bool, is_outdated: bool)
+ - assets_exist: True if WebUI build files exist
+ - is_outdated: True if source is newer than build (only in dev environment)
"""
webui_dir = Path(__file__).parent / "webui"
index_html = webui_dir / "index.html"
- # 1. Check if build files exist (required)
+ # 1. Check if build files exist
if not index_html.exists():
- ASCIIColors.red("\n" + "=" * 80)
- ASCIIColors.red("ERROR: Frontend Not Built")
- ASCIIColors.red("=" * 80)
+ ASCIIColors.yellow("\n" + "=" * 80)
+ ASCIIColors.yellow("WARNING: Frontend Not Built")
+ ASCIIColors.yellow("=" * 80)
ASCIIColors.yellow("The WebUI frontend has not been built yet.")
+ ASCIIColors.yellow("The API server will start without the WebUI interface.")
ASCIIColors.yellow(
- "Please build the frontend code first using the following commands:\n"
+ "\nTo enable WebUI, build the frontend using these commands:\n"
)
ASCIIColors.cyan(" cd lightrag_webui")
ASCIIColors.cyan(" bun install --frozen-lockfile")
@@ -181,8 +184,8 @@ def check_frontend_build():
ASCIIColors.cyan(
"Note: Make sure you have Bun installed. Visit https://bun.sh for installation."
)
- ASCIIColors.red("=" * 80 + "\n")
- sys.exit(1) # Exit immediately
+ ASCIIColors.yellow("=" * 80 + "\n")
+ return (False, False) # Assets don't exist, not outdated
# 2. Check if this is a development environment (source directory exists)
try:
@@ -195,7 +198,7 @@ def check_frontend_build():
logger.debug(
"Production environment detected, skipping source freshness check"
)
- return False
+ return (True, False) # Assets exist, not outdated (prod environment)
# Development environment, perform source code timestamp check
logger.debug("Development environment detected, checking source freshness")
@@ -270,20 +273,20 @@ def check_frontend_build():
ASCIIColors.cyan(" cd ..")
ASCIIColors.yellow("\nThe server will continue with the current build.")
ASCIIColors.yellow("=" * 80 + "\n")
- return True # Frontend is outdated
+ return (True, True) # Assets exist, outdated
else:
logger.info("Frontend build is up-to-date")
- return False # Frontend is up-to-date
+ return (True, False) # Assets exist, up-to-date
except Exception as e:
# If check fails, log warning but don't affect startup
logger.warning(f"Failed to check frontend source freshness: {e}")
- return False # Assume up-to-date on error
+ return (True, False) # Assume assets exist and up-to-date on error
def create_app(args):
- # Check frontend build first and get outdated status
- is_frontend_outdated = check_frontend_build()
+ # Check frontend build first and get status
+ webui_assets_exist, is_frontend_outdated = check_frontend_build()
# Create unified API version display with warning symbol if frontend is outdated
api_version_display = (
@@ -1067,8 +1070,11 @@ def create_app(args):
@app.get("/")
async def redirect_to_webui():
- """Redirect root path to /webui"""
- return RedirectResponse(url="/webui")
+ """Redirect root path based on WebUI availability"""
+ if webui_assets_exist:
+ return RedirectResponse(url="/webui")
+ else:
+ return RedirectResponse(url="/docs")
@app.get("/auth-status")
async def get_auth_status():
@@ -1135,9 +1141,41 @@ def create_app(args):
"webui_description": webui_description,
}
- @app.get("/health", dependencies=[Depends(combined_auth)])
+ @app.get(
+ "/health",
+ dependencies=[Depends(combined_auth)],
+ summary="Get system health and configuration status",
+ description="Returns comprehensive system status including WebUI availability, configuration, and operational metrics",
+ response_description="System health status with configuration details",
+ responses={
+ 200: {
+ "description": "Successful response with system status",
+ "content": {
+ "application/json": {
+ "example": {
+ "status": "healthy",
+ "webui_available": True,
+ "working_directory": "/path/to/working/dir",
+ "input_directory": "/path/to/input/dir",
+ "configuration": {
+ "llm_binding": "openai",
+ "llm_model": "gpt-4",
+ "embedding_binding": "openai",
+ "embedding_model": "text-embedding-ada-002",
+ "workspace": "default",
+ },
+ "auth_mode": "enabled",
+ "pipeline_busy": False,
+ "core_version": "0.0.1",
+ "api_version": "0.0.1",
+ }
+ }
+ },
+ }
+ },
+ )
async def get_status(request: Request):
- """Get current system status"""
+ """Get current system status including WebUI availability"""
try:
workspace = get_workspace_from_request(request)
default_workspace = get_default_workspace()
@@ -1157,6 +1195,7 @@ def create_app(args):
return {
"status": "healthy",
+ "webui_available": webui_assets_exist,
"working_directory": str(args.working_dir),
"input_directory": str(args.input_dir),
"configuration": {
@@ -1246,16 +1285,27 @@ def create_app(args):
name="swagger-ui-static",
)
- # Webui mount webui/index.html
- static_dir = Path(__file__).parent / "webui"
- static_dir.mkdir(exist_ok=True)
- app.mount(
- "/webui",
- SmartStaticFiles(
- directory=static_dir, html=True, check_dir=True
- ), # Use SmartStaticFiles
- name="webui",
- )
+ # Conditionally mount WebUI only if assets exist
+ if webui_assets_exist:
+ static_dir = Path(__file__).parent / "webui"
+ static_dir.mkdir(exist_ok=True)
+ app.mount(
+ "/webui",
+ SmartStaticFiles(
+ directory=static_dir, html=True, check_dir=True
+ ), # Use SmartStaticFiles
+ name="webui",
+ )
+ logger.info("WebUI assets mounted at /webui")
+ else:
+ logger.info("WebUI assets not available, /webui route not mounted")
+
+ # Add redirect for /webui when assets are not available
+ @app.get("/webui")
+ @app.get("/webui/")
+ async def webui_redirect_to_docs():
+ """Redirect /webui to /docs when WebUI is not available"""
+ return RedirectResponse(url="/docs")
return app
From 8994c70f2f6590e5048eb68e2ff89ae7de0e8c9b Mon Sep 17 00:00:00 2001
From: EightyOliveira
Date: Tue, 25 Nov 2025 16:36:41 +0800
Subject: [PATCH 7/9] fix:exception handling order error
---
lightrag/llm/openai.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/lightrag/llm/openai.py b/lightrag/llm/openai.py
index 11a5f9d7..5ab2ac87 100644
--- a/lightrag/llm/openai.py
+++ b/lightrag/llm/openai.py
@@ -309,6 +309,10 @@ async def openai_complete_if_cache(
response = await openai_async_client.chat.completions.create(
model=api_model, messages=messages, **kwargs
)
+ except APITimeoutError as e:
+ logger.error(f"OpenAI API Timeout Error: {e}")
+ await openai_async_client.close() # Ensure client is closed
+ raise
except APIConnectionError as e:
logger.error(f"OpenAI API Connection Error: {e}")
await openai_async_client.close() # Ensure client is closed
@@ -317,10 +321,6 @@ async def openai_complete_if_cache(
logger.error(f"OpenAI API Rate Limit Error: {e}")
await openai_async_client.close() # Ensure client is closed
raise
- except APITimeoutError as e:
- logger.error(f"OpenAI API Timeout Error: {e}")
- await openai_async_client.close() # Ensure client is closed
- raise
except Exception as e:
logger.error(
f"OpenAI API Call Failed,\nModel: {model},\nParams: {kwargs}, Got: {e}"
From 777c91794b7b2ad5cdfabf770b542ee5acd1f60f Mon Sep 17 00:00:00 2001
From: yangdx
Date: Tue, 25 Nov 2025 17:16:55 +0800
Subject: [PATCH 8/9] Add Langfuse observability configuration to env.example
- Add Langfuse environment variables
- Include setup instructions
- Support OpenAI compatible APIs
- Enable tracing configuration
- Add cloud/self-host options
---
env.example | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/env.example b/env.example
index fea99953..d30a03cb 100644
--- a/env.example
+++ b/env.example
@@ -447,6 +447,17 @@ MEMGRAPH_DATABASE=memgraph
### DB specific workspace should not be set, keep for compatible only
### MEMGRAPH_WORKSPACE=forced_workspace_name
+###########################################################
+### Langfuse Observability Configuration
+### Only works with LLM provided by OpenAI compatible API
+### Install with: pip install lightrag-hku[observability]
+### Sign up at: https://cloud.langfuse.com or self-host
+###########################################################
+# LANGFUSE_SECRET_KEY=""
+# LANGFUSE_PUBLIC_KEY=""
+# LANGFUSE_HOST="https://cloud.langfuse.com" # 或您的自托管实例地址
+# LANGFUSE_ENABLE_TRACE=true
+
############################
### Evaluation Configuration
############################
From 93d445dfdd4c51e4e56a516bae0b81036593434f Mon Sep 17 00:00:00 2001
From: yangdx
Date: Tue, 25 Nov 2025 18:24:39 +0800
Subject: [PATCH 9/9] Add pipeline status lock function for legacy
compatibility
- Add get_pipeline_status_lock function
- Return NamespaceLock for consistency
- Support workspace parameter
- Enable logging option
- Legacy code compatibility
---
lightrag/kg/shared_storage.py | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/lightrag/kg/shared_storage.py b/lightrag/kg/shared_storage.py
index 834cdc8f..ef0f61e2 100644
--- a/lightrag/kg/shared_storage.py
+++ b/lightrag/kg/shared_storage.py
@@ -1683,3 +1683,17 @@ def get_default_workspace() -> str:
"""
global _default_workspace
return _default_workspace
+
+
+def get_pipeline_status_lock(
+ enable_logging: bool = False, workspace: str = None
+) -> NamespaceLock:
+ """Return unified storage lock for pipeline status data consistency.
+
+ This function is for compatibility with legacy code only.
+ """
+ global _default_workspace
+ actual_workspace = workspace if workspace else _default_workspace
+ return get_namespace_lock(
+ "pipeline_status", workspace=actual_workspace, enable_logging=enable_logging
+ )