Merge branch 'main' into rerank

This commit is contained in:
zrguo 2025-07-14 16:24:29 +08:00
commit c9cbd2d3e0
74 changed files with 1672 additions and 1012 deletions

View file

@ -250,7 +250,7 @@ if __name__ == "__main__":
| **embedding_func_max_async** | `int` | 最大并发异步嵌入进程数 | `16` |
| **llm_model_func** | `callable` | LLM生成的函数 | `gpt_4o_mini_complete` |
| **llm_model_name** | `str` | 用于生成的LLM模型名称 | `meta-llama/Llama-3.2-1B-Instruct` |
| **llm_model_max_token_size** | `int` | LLM生成的最大令牌大小影响实体关系摘要 | `32768`默认值由环境变量MAX_TOKENS更改 |
| **llm_model_max_token_size** | `int` | 生成实体关系摘要时送给LLM的最大令牌数 | `32000`默认值由环境变量MAX_TOKENS更改 |
| **llm_model_max_async** | `int` | 最大并发异步LLM进程数 | `4`默认值由环境变量MAX_ASYNC更改 |
| **llm_model_kwargs** | `dict` | LLM生成的附加参数 | |
| **vector_db_storage_cls_kwargs** | `dict` | 向量数据库的附加参数,如设置节点和关系检索的阈值 | cosine_better_than_threshold: 0.2默认值由环境变量COSINE_THRESHOLD更改 |

View file

@ -257,7 +257,7 @@ A full list of LightRAG init parameters:
| **embedding_func_max_async** | `int` | Maximum number of concurrent asynchronous embedding processes | `16` |
| **llm_model_func** | `callable` | Function for LLM generation | `gpt_4o_mini_complete` |
| **llm_model_name** | `str` | LLM model name for generation | `meta-llama/Llama-3.2-1B-Instruct` |
| **llm_model_max_token_size** | `int` | Maximum token size for LLM generation (affects entity relation summaries) | `32768`default value changed by env var MAX_TOKENS) |
| **llm_model_max_token_size** | `int` | Maximum tokens send to LLM to generate entity relation summaries | `32000`default value changed by env var MAX_TOKENS) |
| **llm_model_max_async** | `int` | Maximum number of concurrent asynchronous LLM processes | `4`default value changed by env var MAX_ASYNC) |
| **llm_model_kwargs** | `dict` | Additional parameters for LLM generation | |
| **vector_db_storage_cls_kwargs** | `dict` | Additional parameters for vector database, like setting the threshold for nodes and relations retrieval | cosine_better_than_threshold: 0.2default value changed by env var COSINE_THRESHOLD) |

View file

@ -1,281 +1,114 @@
# LightRAG Multi-Document Processing: Concurrent Control Strategy Analysis
## LightRAG Multi-Document Processing: Concurrent Control Strategy
LightRAG employs a multi-layered concurrent control strategy when processing multiple documents. This article provides an in-depth analysis of the concurrent control mechanisms at document level, chunk level, and LLM request level, helping you understand why specific concurrent behaviors occur.
## Overview
LightRAG's concurrent control is divided into three layers:
1. **Document-level concurrency**: Controls the number of documents processed simultaneously
2. **Chunk-level concurrency**: Controls the number of chunks processed simultaneously within a single document
3. **LLM request-level concurrency**: Controls the global concurrent number of LLM requests
## 1. Document-Level Concurrent Control
### 1. Document-Level Concurrent Control
**Control Parameter**: `max_parallel_insert`
Document-level concurrency is controlled by the `max_parallel_insert` parameter, with a default value of 2.
This parameter controls the number of documents processed simultaneously. The purpose is to prevent excessive parallelism from overwhelming system resources, which could lead to extended processing times for individual files. Document-level concurrency is governed by the `max_parallel_insert` attribute within LightRAG, which defaults to 2 and is configurable via the `MAX_PARALLEL_INSERT` environment variable.
```python
# lightrag/lightrag.py
max_parallel_insert: int = field(default=int(os.getenv("MAX_PARALLEL_INSERT", 2)))
```
### Implementation Mechanism
In the `apipeline_process_enqueue_documents` method, a semaphore is used to control document concurrency:
```python
# lightrag/lightrag.py - apipeline_process_enqueue_documents method
async def process_document(
doc_id: str,
status_doc: DocProcessingStatus,
split_by_character: str | None,
split_by_character_only: bool,
pipeline_status: dict,
pipeline_status_lock: asyncio.Lock,
semaphore: asyncio.Semaphore, # Document-level semaphore
) -> None:
"""Process single document"""
async with semaphore: # 🔥 Document-level concurrent control
# ... Process all chunks of a single document
# Create document-level semaphore
semaphore = asyncio.Semaphore(self.max_parallel_insert) # Default 2
# Create processing tasks for each document
doc_tasks = []
for doc_id, status_doc in to_process_docs.items():
doc_tasks.append(
process_document(
doc_id, status_doc, split_by_character, split_by_character_only,
pipeline_status, pipeline_status_lock, semaphore
)
)
# Wait for all documents to complete processing
await asyncio.gather(*doc_tasks)
```
## 2. Chunk-Level Concurrent Control
### 2. Chunk-Level Concurrent Control
**Control Parameter**: `llm_model_max_async`
**Key Point**: Each document independently creates its own chunk semaphore!
```python
# lightrag/lightrag.py
llm_model_max_async: int = field(default=int(os.getenv("MAX_ASYNC", 4)))
```
### Implementation Mechanism
In the `extract_entities` function, **each document independently creates** its own chunk semaphore:
```python
# lightrag/operate.py - extract_entities function
async def extract_entities(chunks: dict[str, TextChunkSchema], global_config: dict[str, str], ...):
# 🔥 Key: Each document independently creates this semaphore!
llm_model_max_async = global_config.get("llm_model_max_async", 4)
semaphore = asyncio.Semaphore(llm_model_max_async) # Chunk semaphore for each document
async def _process_with_semaphore(chunk):
async with semaphore: # 🔥 Chunk concurrent control within document
return await _process_single_content(chunk)
# Create tasks for each chunk
tasks = []
for c in ordered_chunks:
task = asyncio.create_task(_process_with_semaphore(c))
tasks.append(task)
# Wait for all chunks to complete processing
done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_EXCEPTION)
chunk_results = [task.result() for task in tasks]
return chunk_results
```
### Important Inference: System Overall Chunk Concurrency
Since each document independently creates chunk semaphores, the theoretical chunk concurrency of the system is:
**Theoretical Chunk Concurrency = max_parallel_insert × llm_model_max_async**
This parameter controls the number of chunks processed simultaneously in the extraction stage within a document. The purpose is to prevent a high volume of concurrent requests from monopolizing LLM processing resources, which would impede the efficient parallel processing of multiple files. Chunk-Level Concurrent Control is governed by the `llm_model_max_async` attribute within LightRAG, which defaults to 4 and is configurable via the `MAX_ASYNC` environment variable. The purpose of this parameter is to fully leverage the LLM's concurrency capabilities when processing individual documents.
In the `extract_entities` function, **each document independently creates** its own chunk semaphore. Since each document independently creates chunk semaphores, the theoretical chunk concurrency of the system is:
$$
ChunkConcurrency = Max Parallel Insert × LLM Model Max Async
$$
For example:
- `max_parallel_insert = 2` (process 2 documents simultaneously)
- `llm_model_max_async = 4` (maximum 4 chunk concurrency per document)
- **Theoretical result**: Maximum 2 × 4 = 8 chunks simultaneously in "processing" state
- Theoretical chunk-level concurrent: 2 × 4 = 8
## 3. LLM Request-Level Concurrent Control (The Real Bottleneck)
### 3. Graph-Level Concurrent Control
**Control Parameter**: `llm_model_max_async` (globally shared)
**Control Parameter**: `llm_model_max_async * 2`
**Key**: Although there might be 8 chunks "in processing", all LLM requests share the same global priority queue!
This parameter controls the number of entities and relations processed simultaneously in the merging stage within a document. The purpose is to prevent a high volume of concurrent requests from monopolizing LLM processing resources, which would impede the efficient parallel processing of multiple files. Graph-level concurrency is governed by the `llm_model_max_async` attribute within LightRAG, which defaults to 4 and is configurable via the `MAX_ASYNC` environment variable. Graph-level parallelism control parameters are equally applicable to managing parallelism during the entity relationship reconstruction phase after document deletion.
Given that the entity relationship merging phase doesn't necessitate LLM interaction for every operation, its parallelism is set at double the LLM's parallelism. This optimizes machine utilization while concurrently preventing excessive queuing resource contention for the LLM.
### 4. LLM-Level Concurrent Control
**Control Parameter**: `llm_model_max_async`
This parameter governs the **concurrent volume** of LLM requests dispatched by the entire LightRAG system, encompassing the document extraction stage, merging stage, and user query handling.
LLM request prioritization is managed via a global priority queue, which **systematically prioritizes user queries** over merging-related requests, and merging-related requests over extraction-related requests. This strategic prioritization **minimizes user query latency**.
LLM-level concurrency is governed by the `llm_model_max_async` attribute within LightRAG, which defaults to 4 and is configurable via the `MAX_ASYNC` environment variable.
### 5. Complete Concurrent Hierarchy Diagram
```mermaid
graph TD
classDef doc fill:#e6f3ff,stroke:#5b9bd5,stroke-width:2px;
classDef chunk fill:#fbe5d6,stroke:#ed7d31,stroke-width:1px;
classDef merge fill:#e2f0d9,stroke:#70ad47,stroke-width:2px;
A["Multiple Documents<br>max_parallel_insert = 2"] --> A1
A --> B1
A1[DocA: split to n chunks] --> A_chunk;
B1[DocB: split to m chunks] --> B_chunk;
subgraph A_chunk[Extraction Stage]
A_chunk_title[Entity Relation Extraction<br>llm_model_max_async = 4];
A_chunk_title --> A_chunk1[Chunk A1]:::chunk;
A_chunk_title --> A_chunk2[Chunk A2]:::chunk;
A_chunk_title --> A_chunk3[Chunk A3]:::chunk;
A_chunk_title --> A_chunk4[Chunk A4]:::chunk;
A_chunk1 & A_chunk2 & A_chunk3 & A_chunk4 --> A_chunk_done([Extraction Complete]);
end
subgraph B_chunk[Extraction Stage]
B_chunk_title[Entity Relation Extraction<br>llm_model_max_async = 4];
B_chunk_title --> B_chunk1[Chunk B1]:::chunk;
B_chunk_title --> B_chunk2[Chunk B2]:::chunk;
B_chunk_title --> B_chunk3[Chunk B3]:::chunk;
B_chunk_title --> B_chunk4[Chunk B4]:::chunk;
B_chunk1 & B_chunk2 & B_chunk3 & B_chunk4 --> B_chunk_done([Extraction Complete]);
end
A_chunk -.->|LLM Request| LLM_Queue;
A_chunk --> A_merge;
B_chunk --> B_merge;
subgraph A_merge[Merge Stage]
A_merge_title[Entity Relation Merging<br>llm_model_max_async * 2 = 8];
A_merge_title --> A1_entity[Ent a1]:::merge;
A_merge_title --> A2_entity[Ent a2]:::merge;
A_merge_title --> A3_entity[Rel a3]:::merge;
A_merge_title --> A4_entity[Rel a4]:::merge;
A1_entity & A2_entity & A3_entity & A4_entity --> A_done([Merge Complete])
end
subgraph B_merge[Merge Stage]
B_merge_title[Entity Relation Merging<br>llm_model_max_async * 2 = 8];
B_merge_title --> B1_entity[Ent b1]:::merge;
B_merge_title --> B2_entity[Ent b2]:::merge;
B_merge_title --> B3_entity[Rel b3]:::merge;
B_merge_title --> B4_entity[Rel b4]:::merge;
B1_entity & B2_entity & B3_entity & B4_entity --> B_done([Merge Complete])
end
A_merge -.->|LLM Request| LLM_Queue["LLM Request Prioritized Queue<br>llm_model_max_async = 4"];
B_merge -.->|LLM Request| LLM_Queue;
B_chunk -.->|LLM Request| LLM_Queue;
```python
# lightrag/lightrag.py - __post_init__ method
self.llm_model_func = priority_limit_async_func_call(self.llm_model_max_async)(
partial(
self.llm_model_func,
hashing_kv=hashing_kv,
**self.llm_model_kwargs,
)
)
# 🔥 Global LLM queue size = llm_model_max_async = 4
```
### Priority Queue Implementation
> The extraction and merge stages share a global prioritized LLM queue, regulated by `llm_model_max_async`. While numerous entity and relation extraction and merging operations may be "actively processing", **only a limited number will concurrently execute LLM requests** the remainder will be queued and awaiting their turn.
```python
# lightrag/utils.py - priority_limit_async_func_call function
def priority_limit_async_func_call(max_size: int, max_queue_size: int = 1000):
def final_decro(func):
queue = asyncio.PriorityQueue(maxsize=max_queue_size)
tasks = set()
### 6. Performance Optimization Recommendations
async def worker():
"""Worker that processes tasks in the priority queue"""
while not shutdown_event.is_set():
try:
priority, count, future, args, kwargs = await asyncio.wait_for(queue.get(), timeout=1.0)
result = await func(*args, **kwargs) # 🔥 Actual LLM call
if not future.done():
future.set_result(result)
except Exception as e:
# Error handling...
finally:
queue.task_done()
* **Increase LLM Concurrent Setting based on the capabilities of your LLM server or API provider**
# 🔥 Create fixed number of workers (max_size), this is the real concurrency limit
for _ in range(max_size):
task = asyncio.create_task(worker())
tasks.add(task)
```
During the file processing phase, the performance and concurrency capabilities of the LLM are critical bottlenecks. When deploying LLMs locally, the service's concurrency capacity must adequately account for the context length requirements of LightRAG. LightRAG recommends that LLMs support a minimum context length of 32KB; therefore, server concurrency should be calculated based on this benchmark. For API providers, LightRAG will retry requests up to three times if the client's request is rejected due to concurrent request limits. Backend logs can be used to determine if LLM retries are occurring, thereby indicating whether `MAX_ASYNC` has exceeded the API provider's limits.
## 4. Chunk Internal Processing Mechanism (Serial)
* **Align Parallel Document Insertion Settings with LLM Concurrency Configurations**
### Why Serial?
Internal processing of each chunk strictly follows this serial execution order:
```python
# lightrag/operate.py - _process_single_content function
async def _process_single_content(chunk_key_dp: tuple[str, TextChunkSchema]):
# Step 1: Initial entity extraction
hint_prompt = entity_extract_prompt.format(**{**context_base, "input_text": content})
final_result = await use_llm_func_with_cache(hint_prompt, use_llm_func, ...)
# Process initial extraction results
maybe_nodes, maybe_edges = await _process_extraction_result(final_result, chunk_key, file_path)
# Step 2: Gleaning phase
for now_glean_index in range(entity_extract_max_gleaning):
# 🔥 Serial wait for gleaning results
glean_result = await use_llm_func_with_cache(
continue_prompt, use_llm_func,
llm_response_cache=llm_response_cache,
history_messages=history, cache_type="extract"
)
# Process gleaning results
glean_nodes, glean_edges = await _process_extraction_result(glean_result, chunk_key, file_path)
# Merge results...
# Step 3: Determine whether to continue loop
if now_glean_index == entity_extract_max_gleaning - 1:
break
# 🔥 Serial wait for loop decision results
if_loop_result = await use_llm_func_with_cache(
if_loop_prompt, use_llm_func,
llm_response_cache=llm_response_cache,
history_messages=history, cache_type="extract"
)
if if_loop_result.strip().strip('"').strip("'").lower() != "yes":
break
return maybe_nodes, maybe_edges
```
## 5. Complete Concurrent Hierarchy Diagram
![lightrag_indexing.png](assets%2Flightrag_indexing.png)
### Chunk Internal Processing (Serial)
```
Initial Extraction → Gleaning → Loop Decision → Complete
```
## 6. Real-World Scenario Analysis
### Scenario 1: Single Document with Multiple Chunks
Assume 1 document with 6 chunks:
- **Document level**: Only 1 document, not limited by `max_parallel_insert`
- **Chunk level**: Maximum 4 chunks processed simultaneously (limited by `llm_model_max_async=4`)
- **LLM level**: Global maximum 4 LLM requests concurrent
**Expected behavior**: 4 chunks process concurrently, remaining 2 chunks wait.
### Scenario 2: Multiple Documents with Multiple Chunks
Assume 3 documents, each with 10 chunks:
- **Document level**: Maximum 2 documents processed simultaneously
- **Chunk level**: Maximum 4 chunks per document processed simultaneously
- **Theoretical Chunk concurrency**: 2 × 4 = 8 chunks processed simultaneously
- **Actual LLM concurrency**: Only 4 LLM requests actually execute
**Actual state distribution**:
```
# Possible system state:
Document 1: 4 chunks "processing" (2 executing LLM, 2 waiting for LLM response)
Document 2: 4 chunks "processing" (2 executing LLM, 2 waiting for LLM response)
Document 3: Waiting for document-level semaphore
Total:
- 8 chunks in "processing" state
- 4 LLM requests actually executing
- 4 chunks waiting for LLM response
```
## 7. Performance Optimization Recommendations
### Understanding the Bottleneck
The real bottleneck is the global LLM queue, not the chunk semaphores!
### Adjustment Strategies
**Strategy 1: Increase LLM Concurrent Capacity**
```bash
# Environment variable configuration
export MAX_PARALLEL_INSERT=2 # Keep document concurrency
export MAX_ASYNC=8 # 🔥 Increase LLM request concurrency
```
**Strategy 2: Balance Document and LLM Concurrency**
```python
rag = LightRAG(
max_parallel_insert=3, # Moderately increase document concurrency
llm_model_max_async=12, # Significantly increase LLM concurrency
entity_extract_max_gleaning=0, # Reduce serial steps within chunks
)
```
## 8. Summary
Key characteristics of LightRAG's multi-document concurrent processing mechanism:
### Concurrent Layers
1. **Inter-document competition**: Controlled by `max_parallel_insert`, default 2 documents concurrent
2. **Theoretical Chunk concurrency**: Each document independently creates semaphores, total = max_parallel_insert × llm_model_max_async
3. **Actual LLM concurrency**: All chunks share global LLM queue, controlled by `llm_model_max_async`
4. **Intra-chunk serial**: Multiple LLM requests within each chunk execute strictly serially
### Key Insights
- **Theoretical vs Actual**: System may have many chunks "in processing", but only few are actually executing LLM requests
- **Real Bottleneck**: Global LLM request queue is the performance bottleneck, not chunk semaphores
- **Optimization Focus**: Increasing `llm_model_max_async` is more effective than increasing `max_parallel_insert`
The recommended number of parallel document processing tasks is 1/4 of the LLM's concurrency, with a minimum of 2 and a maximum of 10. Setting a higher number of parallel document processing tasks typically does not accelerate overall document processing speed, as even a small number of concurrently processed documents can fully utilize the LLM's parallel processing capabilities. Excessive parallel document processing can significantly increase the processing time for each individual document. Since LightRAG commits processing results on a file-by-file basis, a large number of concurrent files would necessitate caching a substantial amount of data. In the event of a system error, all documents in the middle stage would require reprocessing, thereby increasing error handling costs. For instance, setting `MAX_PARALLEL_INSERT` to 3 is appropriate when `MAX_ASYNC` is configured to 12.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 183 KiB

View file

@ -1,277 +0,0 @@
# LightRAG 多文档并发控制机制详解
LightRAG 在处理多个文档时采用了多层次的并发控制策略。本文将深入分析文档级别、chunk级别和LLM请求级别的并发控制机制帮助您理解为什么会出现特定的并发行为。
## 概述
LightRAG 的并发控制分为三个层次:
1. 文档级别并发:控制同时处理的文档数量
2. Chunk级别并发控制单个文档内同时处理的chunk数量
3. LLM请求级别并发控制全局LLM请求的并发数量
## 1. 文档级别并发控制
**控制参数**`max_parallel_insert`
文档级别的并发由 `max_parallel_insert` 参数控制默认值为2。
```python
# lightrag/lightrag.py
max_parallel_insert: int = field(default=int(os.getenv("MAX_PARALLEL_INSERT", 2)))
```
### 实现机制
`apipeline_process_enqueue_documents` 方法中,使用信号量控制文档并发:
```python
# lightrag/lightrag.py - apipeline_process_enqueue_documents方法
async def process_document(
doc_id: str,
status_doc: DocProcessingStatus,
split_by_character: str | None,
split_by_character_only: bool,
pipeline_status: dict,
pipeline_status_lock: asyncio.Lock,
semaphore: asyncio.Semaphore, # 文档级别信号量
) -> None:
"""Process single document"""
async with semaphore: # 🔥 文档级别并发控制
# ... 处理单个文档的所有chunks
# 创建文档级别信号量
semaphore = asyncio.Semaphore(self.max_parallel_insert) # 默认2
# 为每个文档创建处理任务
doc_tasks = []
for doc_id, status_doc in to_process_docs.items():
doc_tasks.append(
process_document(
doc_id, status_doc, split_by_character, split_by_character_only,
pipeline_status, pipeline_status_lock, semaphore
)
)
# 等待所有文档处理完成
await asyncio.gather(*doc_tasks)
```
## 2. Chunk级别并发控制
**控制参数**`llm_model_max_async`
**关键点**每个文档都会独立创建自己的chunk信号量
```python
# lightrag/lightrag.py
llm_model_max_async: int = field(default=int(os.getenv("MAX_ASYNC", 4)))
```
### 实现机制
`extract_entities` 函数中,**每个文档独立创建**自己的chunk信号量
```python
# lightrag/operate.py - extract_entities函数
async def extract_entities(chunks: dict[str, TextChunkSchema], global_config: dict[str, str], ...):
# 🔥 关键:每个文档都会独立创建这个信号量!
llm_model_max_async = global_config.get("llm_model_max_async", 4)
semaphore = asyncio.Semaphore(llm_model_max_async) # 每个文档的chunk信号量
async def _process_with_semaphore(chunk):
async with semaphore: # 🔥 文档内部的chunk并发控制
return await _process_single_content(chunk)
# 为每个chunk创建任务
tasks = []
for c in ordered_chunks:
task = asyncio.create_task(_process_with_semaphore(c))
tasks.append(task)
# 等待所有chunk处理完成
done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_EXCEPTION)
chunk_results = [task.result() for task in tasks]
return chunk_results
```
### 重要推论系统整体Chunk并发数
由于每个文档独立创建chunk信号量系统理论上的chunk并发数是
**理论Chunk并发数 = max_parallel_insert × llm_model_max_async**
例如:
- `max_parallel_insert = 2`同时处理2个文档
- `llm_model_max_async = 4`每个文档最多4个chunk并发
- 理论结果:最多 2 × 4 = 8个chunk同时处于"处理中"状态
## 3. LLM请求级别并发控制真正的瓶颈
**控制参数**`llm_model_max_async`(全局共享)
**关键**尽管可能有8个chunk在"处理中"但所有LLM请求共享同一个全局优先级队列
```python
# lightrag/lightrag.py - __post_init__方法
self.llm_model_func = priority_limit_async_func_call(self.llm_model_max_async)(
partial(
self.llm_model_func,
hashing_kv=hashing_kv,
**self.llm_model_kwargs,
)
)
# 🔥 全局LLM队列大小 = llm_model_max_async = 4
```
### 优先级队列实现
```python
# lightrag/utils.py - priority_limit_async_func_call函数
def priority_limit_async_func_call(max_size: int, max_queue_size: int = 1000):
def final_decro(func):
queue = asyncio.PriorityQueue(maxsize=max_queue_size)
tasks = set()
async def worker():
"""Worker that processes tasks in the priority queue"""
while not shutdown_event.is_set():
try:
priority, count, future, args, kwargs = await asyncio.wait_for(queue.get(), timeout=1.0)
result = await func(*args, **kwargs) # 🔥 实际LLM调用
if not future.done():
future.set_result(result)
except Exception as e:
# 错误处理...
finally:
queue.task_done()
# 🔥 创建固定数量的workermax_size个这是真正的并发限制
for _ in range(max_size):
task = asyncio.create_task(worker())
tasks.add(task)
```
## 4. Chunk内部处理机制串行
### 为什么是串行?
每个chunk内部的处理严格按照以下顺序串行执行
```python
# lightrag/operate.py - _process_single_content函数
async def _process_single_content(chunk_key_dp: tuple[str, TextChunkSchema]):
# 步骤1初始实体提取
hint_prompt = entity_extract_prompt.format(**{**context_base, "input_text": content})
final_result = await use_llm_func_with_cache(hint_prompt, use_llm_func, ...)
# 处理初始提取结果
maybe_nodes, maybe_edges = await _process_extraction_result(final_result, chunk_key, file_path)
# 步骤2Gleaning深挖阶段
for now_glean_index in range(entity_extract_max_gleaning):
# 🔥 串行等待gleaning结果
glean_result = await use_llm_func_with_cache(
continue_prompt, use_llm_func,
llm_response_cache=llm_response_cache,
history_messages=history, cache_type="extract"
)
# 处理gleaning结果
glean_nodes, glean_edges = await _process_extraction_result(glean_result, chunk_key, file_path)
# 合并结果...
# 步骤3判断是否继续循环
if now_glean_index == entity_extract_max_gleaning - 1:
break
# 🔥 串行等待循环判断结果
if_loop_result = await use_llm_func_with_cache(
if_loop_prompt, use_llm_func,
llm_response_cache=llm_response_cache,
history_messages=history, cache_type="extract"
)
if if_loop_result.strip().strip('"').strip("'").lower() != "yes":
break
return maybe_nodes, maybe_edges
```
## 5. 完整的并发层次图
![lightrag_indexing.png](..%2Fassets%2Flightrag_indexing.png)
## 6. 实际运行场景分析
### 场景1单文档多Chunk
假设有1个文档包含6个chunks
- 文档级别只有1个文档不受 `max_parallel_insert` 限制
- Chunk级别最多4个chunks同时处理`llm_model_max_async=4` 限制)
- LLM级别全局最多4个LLM请求并发
**预期行为**4个chunks并发处理剩余2个chunks等待。
### 场景2多文档多Chunk
假设有3个文档每个文档包含10个chunks
- 文档级别最多2个文档同时处理
- Chunk级别每个文档最多4个chunks同时处理
- 理论Chunk并发2 × 4 = 8个chunks同时处理
- 实际LLM并发只有4个LLM请求真正执行
**实际状态分布**
```
# 可能的系统状态:
文档1: 4个chunks"处理中"其中2个在执行LLM2个在等待LLM响应
文档2: 4个chunks"处理中"其中2个在执行LLM2个在等待LLM响应
文档3: 等待文档级别信号量
总计:
- 8个chunks处于"处理中"状态
- 4个LLM请求真正执行
- 4个chunks等待LLM响应
```
## 7. 性能优化建议
### 理解瓶颈
**真正的瓶颈是全局LLM队列而不是chunk信号量**
### 调整策略
**策略1提高LLM并发能力**
```bash
# 环境变量配置
export MAX_PARALLEL_INSERT=2 # 保持文档并发
export MAX_ASYNC=8 # 🔥 增加LLM请求并发数
```
**策略2平衡文档和LLM并发**
```python
rag = LightRAG(
max_parallel_insert=3, # 适度增加文档并发
llm_model_max_async=12, # 大幅增加LLM并发
entity_extract_max_gleaning=0, # 减少chunk内串行步骤
)
```
## 8. 总结
LightRAG的多文档并发处理机制的关键特点
### 并发层次
1. **文档间争抢**:受 `max_parallel_insert` 控制默认2个文档并发
2. **理论Chunk并发**:每个文档独立创建信号量,总数 = `max_parallel_insert × llm_model_max_async`
3. **实际LLM并发**所有chunk共享全局LLM队列`llm_model_max_async` 控制
4. **单Chunk内串行**每个chunk内的多个LLM请求严格串行执行
### 关键洞察
- **理论vs实际**系统可能有很多chunk在"处理中"但只有少数在真正执行LLM请求
- **真正瓶颈**全局LLM请求队列是性能瓶颈而不是chunk信号量
- **优化重点**:提高 `llm_model_max_async` 比增加 `max_parallel_insert` 更有效

View file

@ -46,7 +46,6 @@ OLLAMA_EMULATING_MODEL_TAG=latest
### Chunk size for document splitting, 500~1500 is recommended
# CHUNK_SIZE=1200
# CHUNK_OVERLAP_SIZE=100
# MAX_TOKEN_SUMMARY=500
### RAG Query Configuration
# HISTORY_TURNS=3
@ -94,8 +93,7 @@ TEMPERATURE=0
### Max concurrency requests of LLM
MAX_ASYNC=4
### MAX_TOKENS: max tokens send to LLM for entity relation summaries (less than context size of the model)
### MAX_TOKENS: set as num_ctx option for Ollama by API Server
MAX_TOKENS=32768
MAX_TOKENS=32000
### LLM Binding type: openai, ollama, lollms, azure_openai
LLM_BINDING=openai
LLM_MODEL=gpt-4o
@ -104,6 +102,8 @@ LLM_BINDING_API_KEY=your_api_key
### Optional for Azure
# AZURE_OPENAI_API_VERSION=2024-08-01-preview
# AZURE_OPENAI_DEPLOYMENT=gpt-4o
### set as num_ctx option for Ollama LLM
# OLLAMA_NUM_CTX=32768
### Embedding Configuration
### Embedding Binding type: openai, ollama, lollms, azure_openai
@ -116,7 +116,7 @@ EMBEDDING_BINDING_HOST=http://localhost:11434
### Num of chunks send to Embedding in single request
# EMBEDDING_BATCH_NUM=10
### Max concurrency requests for Embedding
# EMBEDDING_FUNC_MAX_ASYNC=16
# EMBEDDING_FUNC_MAX_ASYNC=8
### Maximum tokens sent to Embedding for each chunk (no longer in use?)
# MAX_EMBED_TOKENS=8192
### Optional for Azure

View file

@ -54,8 +54,8 @@ LLM_BINDING=openai
LLM_MODEL=gpt-4o
LLM_BINDING_HOST=https://api.openai.com/v1
LLM_BINDING_API_KEY=your_api_key
### 发送给 LLM 的最大 token 数(小于模型上下文大小)
MAX_TOKENS=32768
### 发送给 LLM 进行实体关系摘要的最大 token 数(小于模型上下文大小)
MAX_TOKENS=32000
EMBEDDING_BINDING=ollama
EMBEDDING_BINDING_HOST=http://localhost:11434
@ -71,8 +71,10 @@ LLM_BINDING=ollama
LLM_MODEL=mistral-nemo:latest
LLM_BINDING_HOST=http://localhost:11434
# LLM_BINDING_API_KEY=your_api_key
### 发送给 LLM 的最大 token 数(基于您的 Ollama 服务器容量)
MAX_TOKENS=8192
### 发送给 LLM 进行实体关系摘要的最大 token 数(小于模型上下文大小)
MAX_TOKENS=7500
### Ollama 服务器上下文 token 数(基于您的 Ollama 服务器容量)
OLLAMA_NUM_CTX=8192
EMBEDDING_BINDING=ollama
EMBEDDING_BINDING_HOST=http://localhost:11434

View file

@ -71,8 +71,10 @@ LLM_BINDING=ollama
LLM_MODEL=mistral-nemo:latest
LLM_BINDING_HOST=http://localhost:11434
# LLM_BINDING_API_KEY=your_api_key
### Max tokens sent to LLM (based on your Ollama Server capacity)
MAX_TOKENS=8192
### Max tokens sent to LLM for entity relation description summarization (Less than LLM context length)
MAX_TOKENS=7500
### Ollama Server context length
OLLAMA_NUM_CTX=8192
EMBEDDING_BINDING=ollama
EMBEDDING_BINDING_HOST=http://localhost:11434

View file

@ -1 +1 @@
__api_version__ = "0180"
__api_version__ = "0182"

View file

@ -108,7 +108,7 @@ def parse_args() -> argparse.Namespace:
parser.add_argument(
"--max-tokens",
type=int,
default=get_env_value("MAX_TOKENS", 32768, int),
default=get_env_value("MAX_TOKENS", 32000, int),
help="Maximum token size (default: from env or 32768)",
)
@ -270,6 +270,9 @@ def parse_args() -> argparse.Namespace:
args.llm_binding = "openai"
args.embedding_binding = "ollama"
# Ollama ctx_num
args.ollama_num_ctx = get_env_value("OLLAMA_NUM_CTX", 32768, int)
args.llm_binding_host = get_env_value(
"LLM_BINDING_HOST", get_default_host(args.llm_binding)
)

View file

@ -52,6 +52,7 @@ from lightrag.kg.shared_storage import (
get_namespace_data,
get_pipeline_status_lock,
initialize_pipeline_status,
cleanup_keyed_lock,
)
from fastapi.security import OAuth2PasswordRequestForm
from lightrag.api.auth import auth_handler
@ -335,7 +336,7 @@ def create_app(args):
llm_model_kwargs={
"host": args.llm_binding_host,
"timeout": args.timeout,
"options": {"num_ctx": args.max_tokens},
"options": {"num_ctx": args.ollama_num_ctx},
"api_key": args.llm_binding_api_key,
}
if args.llm_binding == "lollms" or args.llm_binding == "ollama"
@ -486,6 +487,9 @@ def create_app(args):
else:
auth_mode = "enabled"
# Cleanup expired keyed locks and get status
keyed_lock_info = cleanup_keyed_lock()
return {
"status": "healthy",
"working_directory": str(args.working_dir),
@ -517,6 +521,7 @@ def create_app(args):
},
"auth_mode": auth_mode,
"pipeline_busy": pipeline_status.get("busy", False),
"keyed_locks": keyed_lock_info,
"core_version": core_version,
"api_version": __api_version__,
"webui_title": webui_title,

View file

@ -1 +1 @@
import{e as v,c as b,g as m,k as O,h as P,j as p,l as w,m as c,n as x,t as A,o as N}from"./_baseUniq-CoKY6BVy.js";import{aU as g,aq as _,aV as $,aW as E,aX as F,aY as I,aZ as M,a_ as y,a$ as B,b0 as T}from"./mermaid-vendor-BVBgFwCv.js";var S=/\s/;function q(n){for(var r=n.length;r--&&S.test(n.charAt(r)););return r}var G=/^\s+/;function H(n){return n&&n.slice(0,q(n)+1).replace(G,"")}var o=NaN,L=/^[-+]0x[0-9a-f]+$/i,R=/^0b[01]+$/i,W=/^0o[0-7]+$/i,X=parseInt;function Y(n){if(typeof n=="number")return n;if(v(n))return o;if(g(n)){var r=typeof n.valueOf=="function"?n.valueOf():n;n=g(r)?r+"":r}if(typeof n!="string")return n===0?n:+n;n=H(n);var t=R.test(n);return t||W.test(n)?X(n.slice(2),t?2:8):L.test(n)?o:+n}var z=1/0,C=17976931348623157e292;function K(n){if(!n)return n===0?n:0;if(n=Y(n),n===z||n===-1/0){var r=n<0?-1:1;return r*C}return n===n?n:0}function U(n){var r=K(n),t=r%1;return r===r?t?r-t:r:0}function fn(n){var r=n==null?0:n.length;return r?b(n):[]}var l=Object.prototype,Z=l.hasOwnProperty,dn=_(function(n,r){n=Object(n);var t=-1,e=r.length,a=e>2?r[2]:void 0;for(a&&$(r[0],r[1],a)&&(e=1);++t<e;)for(var f=r[t],i=E(f),s=-1,d=i.length;++s<d;){var u=i[s],h=n[u];(h===void 0||F(h,l[u])&&!Z.call(n,u))&&(n[u]=f[u])}return n});function un(n){var r=n==null?0:n.length;return r?n[r-1]:void 0}function D(n){return function(r,t,e){var a=Object(r);if(!I(r)){var f=m(t);r=O(r),t=function(s){return f(a[s],s,a)}}var i=n(r,t,e);return i>-1?a[f?r[i]:i]:void 0}}var J=Math.max;function Q(n,r,t){var e=n==null?0:n.length;if(!e)return-1;var a=t==null?0:U(t);return a<0&&(a=J(e+a,0)),P(n,m(r),a)}var hn=D(Q);function V(n,r){var t=-1,e=I(n)?Array(n.length):[];return p(n,function(a,f,i){e[++t]=r(a,f,i)}),e}function gn(n,r){var t=M(n)?w:V;return t(n,m(r))}var j=Object.prototype,k=j.hasOwnProperty;function nn(n,r){return n!=null&&k.call(n,r)}function mn(n,r){return n!=null&&c(n,r,nn)}function rn(n,r){return n<r}function tn(n,r,t){for(var e=-1,a=n.length;++e<a;){var f=n[e],i=r(f);if(i!=null&&(s===void 0?i===i&&!v(i):t(i,s)))var s=i,d=f}return d}function on(n){return n&&n.length?tn(n,y,rn):void 0}function an(n,r,t,e){if(!g(n))return n;r=x(r,n);for(var a=-1,f=r.length,i=f-1,s=n;s!=null&&++a<f;){var d=A(r[a]),u=t;if(d==="__proto__"||d==="constructor"||d==="prototype")return n;if(a!=i){var h=s[d];u=void 0,u===void 0&&(u=g(h)?h:B(r[a+1])?[]:{})}T(s,d,u),s=s[d]}return n}function vn(n,r,t){for(var e=-1,a=r.length,f={};++e<a;){var i=r[e],s=N(n,i);t(s,i)&&an(f,x(i,n),s)}return f}export{rn as a,tn as b,V as c,vn as d,on as e,fn as f,hn as g,mn as h,dn as i,U as j,un as l,gn as m,K as t};
import{e as v,c as b,g as m,k as O,h as P,j as p,l as w,m as c,n as x,t as A,o as N}from"./_baseUniq-CtAZZJ8e.js";import{aU as g,aq as _,aV as $,aW as E,aX as F,aY as I,aZ as M,a_ as y,a$ as B,b0 as T}from"./mermaid-vendor-D0f_SE0h.js";var S=/\s/;function q(n){for(var r=n.length;r--&&S.test(n.charAt(r)););return r}var G=/^\s+/;function H(n){return n&&n.slice(0,q(n)+1).replace(G,"")}var o=NaN,L=/^[-+]0x[0-9a-f]+$/i,R=/^0b[01]+$/i,W=/^0o[0-7]+$/i,X=parseInt;function Y(n){if(typeof n=="number")return n;if(v(n))return o;if(g(n)){var r=typeof n.valueOf=="function"?n.valueOf():n;n=g(r)?r+"":r}if(typeof n!="string")return n===0?n:+n;n=H(n);var t=R.test(n);return t||W.test(n)?X(n.slice(2),t?2:8):L.test(n)?o:+n}var z=1/0,C=17976931348623157e292;function K(n){if(!n)return n===0?n:0;if(n=Y(n),n===z||n===-1/0){var r=n<0?-1:1;return r*C}return n===n?n:0}function U(n){var r=K(n),t=r%1;return r===r?t?r-t:r:0}function fn(n){var r=n==null?0:n.length;return r?b(n):[]}var l=Object.prototype,Z=l.hasOwnProperty,dn=_(function(n,r){n=Object(n);var t=-1,e=r.length,a=e>2?r[2]:void 0;for(a&&$(r[0],r[1],a)&&(e=1);++t<e;)for(var f=r[t],i=E(f),s=-1,d=i.length;++s<d;){var u=i[s],h=n[u];(h===void 0||F(h,l[u])&&!Z.call(n,u))&&(n[u]=f[u])}return n});function un(n){var r=n==null?0:n.length;return r?n[r-1]:void 0}function D(n){return function(r,t,e){var a=Object(r);if(!I(r)){var f=m(t);r=O(r),t=function(s){return f(a[s],s,a)}}var i=n(r,t,e);return i>-1?a[f?r[i]:i]:void 0}}var J=Math.max;function Q(n,r,t){var e=n==null?0:n.length;if(!e)return-1;var a=t==null?0:U(t);return a<0&&(a=J(e+a,0)),P(n,m(r),a)}var hn=D(Q);function V(n,r){var t=-1,e=I(n)?Array(n.length):[];return p(n,function(a,f,i){e[++t]=r(a,f,i)}),e}function gn(n,r){var t=M(n)?w:V;return t(n,m(r))}var j=Object.prototype,k=j.hasOwnProperty;function nn(n,r){return n!=null&&k.call(n,r)}function mn(n,r){return n!=null&&c(n,r,nn)}function rn(n,r){return n<r}function tn(n,r,t){for(var e=-1,a=n.length;++e<a;){var f=n[e],i=r(f);if(i!=null&&(s===void 0?i===i&&!v(i):t(i,s)))var s=i,d=f}return d}function on(n){return n&&n.length?tn(n,y,rn):void 0}function an(n,r,t,e){if(!g(n))return n;r=x(r,n);for(var a=-1,f=r.length,i=f-1,s=n;s!=null&&++a<f;){var d=A(r[a]),u=t;if(d==="__proto__"||d==="constructor"||d==="prototype")return n;if(a!=i){var h=s[d];u=void 0,u===void 0&&(u=g(h)?h:B(r[a+1])?[]:{})}T(s,d,u),s=s[d]}return n}function vn(n,r,t){for(var e=-1,a=r.length,f={};++e<a;){var i=r[e],s=N(n,i);t(s,i)&&an(f,x(i,n),s)}return f}export{rn as a,tn as b,V as c,vn as d,on as e,fn as f,hn as g,mn as h,dn as i,U as j,un as l,gn as m,K as t};

File diff suppressed because one or more lines are too long

View file

@ -1 +1 @@
import{_ as l}from"./mermaid-vendor-BVBgFwCv.js";function m(e,c){var i,t,o;e.accDescr&&((i=c.setAccDescription)==null||i.call(c,e.accDescr)),e.accTitle&&((t=c.setAccTitle)==null||t.call(c,e.accTitle)),e.title&&((o=c.setDiagramTitle)==null||o.call(c,e.title))}l(m,"populateCommonDb");export{m as p};
import{_ as l}from"./mermaid-vendor-D0f_SE0h.js";function m(e,c){var i,t,o;e.accDescr&&((i=c.setAccDescription)==null||i.call(c,e.accDescr)),e.accTitle&&((t=c.setAccTitle)==null||t.call(c,e.accTitle)),e.title&&((o=c.setDiagramTitle)==null||o.call(c,e.title))}l(m,"populateCommonDb");export{m as p};

View file

@ -1 +1 @@
import{_ as n,a1 as x,j as l}from"./mermaid-vendor-BVBgFwCv.js";var c=n((a,t)=>{const e=a.append("rect");if(e.attr("x",t.x),e.attr("y",t.y),e.attr("fill",t.fill),e.attr("stroke",t.stroke),e.attr("width",t.width),e.attr("height",t.height),t.name&&e.attr("name",t.name),t.rx&&e.attr("rx",t.rx),t.ry&&e.attr("ry",t.ry),t.attrs!==void 0)for(const r in t.attrs)e.attr(r,t.attrs[r]);return t.class&&e.attr("class",t.class),e},"drawRect"),d=n((a,t)=>{const e={x:t.startx,y:t.starty,width:t.stopx-t.startx,height:t.stopy-t.starty,fill:t.fill,stroke:t.stroke,class:"rect"};c(a,e).lower()},"drawBackgroundRect"),g=n((a,t)=>{const e=t.text.replace(x," "),r=a.append("text");r.attr("x",t.x),r.attr("y",t.y),r.attr("class","legend"),r.style("text-anchor",t.anchor),t.class&&r.attr("class",t.class);const s=r.append("tspan");return s.attr("x",t.x+t.textMargin*2),s.text(e),r},"drawText"),h=n((a,t,e,r)=>{const s=a.append("image");s.attr("x",t),s.attr("y",e);const i=l.sanitizeUrl(r);s.attr("xlink:href",i)},"drawImage"),m=n((a,t,e,r)=>{const s=a.append("use");s.attr("x",t),s.attr("y",e);const i=l.sanitizeUrl(r);s.attr("xlink:href",`#${i}`)},"drawEmbeddedImage"),y=n(()=>({x:0,y:0,width:100,height:100,fill:"#EDF2AE",stroke:"#666",anchor:"start",rx:0,ry:0}),"getNoteRect"),p=n(()=>({x:0,y:0,width:100,height:100,"text-anchor":"start",style:"#666",textMargin:0,rx:0,ry:0,tspan:!0}),"getTextObj");export{d as a,p as b,m as c,c as d,h as e,g as f,y as g};
import{_ as n,a1 as x,j as l}from"./mermaid-vendor-D0f_SE0h.js";var c=n((a,t)=>{const e=a.append("rect");if(e.attr("x",t.x),e.attr("y",t.y),e.attr("fill",t.fill),e.attr("stroke",t.stroke),e.attr("width",t.width),e.attr("height",t.height),t.name&&e.attr("name",t.name),t.rx&&e.attr("rx",t.rx),t.ry&&e.attr("ry",t.ry),t.attrs!==void 0)for(const r in t.attrs)e.attr(r,t.attrs[r]);return t.class&&e.attr("class",t.class),e},"drawRect"),d=n((a,t)=>{const e={x:t.startx,y:t.starty,width:t.stopx-t.startx,height:t.stopy-t.starty,fill:t.fill,stroke:t.stroke,class:"rect"};c(a,e).lower()},"drawBackgroundRect"),g=n((a,t)=>{const e=t.text.replace(x," "),r=a.append("text");r.attr("x",t.x),r.attr("y",t.y),r.attr("class","legend"),r.style("text-anchor",t.anchor),t.class&&r.attr("class",t.class);const s=r.append("tspan");return s.attr("x",t.x+t.textMargin*2),s.text(e),r},"drawText"),h=n((a,t,e,r)=>{const s=a.append("image");s.attr("x",t),s.attr("y",e);const i=l.sanitizeUrl(r);s.attr("xlink:href",i)},"drawImage"),m=n((a,t,e,r)=>{const s=a.append("use");s.attr("x",t),s.attr("y",e);const i=l.sanitizeUrl(r);s.attr("xlink:href",`#${i}`)},"drawEmbeddedImage"),y=n(()=>({x:0,y:0,width:100,height:100,fill:"#EDF2AE",stroke:"#666",anchor:"start",rx:0,ry:0}),"getNoteRect"),p=n(()=>({x:0,y:0,width:100,height:100,"text-anchor":"start",style:"#666",textMargin:0,rx:0,ry:0,tspan:!0}),"getTextObj");export{d as a,p as b,m as c,c as d,h as e,g as f,y as g};

View file

@ -1 +1 @@
import{_ as n,d as r,e as d,l as g}from"./mermaid-vendor-BVBgFwCv.js";var u=n((e,t)=>{let o;return t==="sandbox"&&(o=r("#i"+e)),(t==="sandbox"?r(o.nodes()[0].contentDocument.body):r("body")).select(`[id="${e}"]`)},"getDiagramElement"),b=n((e,t,o,i)=>{e.attr("class",o);const{width:a,height:s,x:h,y:x}=l(e,t);d(e,s,a,i);const c=w(h,x,a,s,t);e.attr("viewBox",c),g.debug(`viewBox configured: ${c} with padding: ${t}`)},"setupViewPortForSVG"),l=n((e,t)=>{var i;const o=((i=e.node())==null?void 0:i.getBBox())||{width:0,height:0,x:0,y:0};return{width:o.width+t*2,height:o.height+t*2,x:o.x,y:o.y}},"calculateDimensionsWithPadding"),w=n((e,t,o,i,a)=>`${e-a} ${t-a} ${o} ${i}`,"createViewBox");export{u as g,b as s};
import{_ as n,d as r,e as d,l as g}from"./mermaid-vendor-D0f_SE0h.js";var u=n((e,t)=>{let o;return t==="sandbox"&&(o=r("#i"+e)),(t==="sandbox"?r(o.nodes()[0].contentDocument.body):r("body")).select(`[id="${e}"]`)},"getDiagramElement"),b=n((e,t,o,i)=>{e.attr("class",o);const{width:a,height:s,x:h,y:x}=l(e,t);d(e,s,a,i);const c=w(h,x,a,s,t);e.attr("viewBox",c),g.debug(`viewBox configured: ${c} with padding: ${t}`)},"setupViewPortForSVG"),l=n((e,t)=>{var i;const o=((i=e.node())==null?void 0:i.getBBox())||{width:0,height:0,x:0,y:0};return{width:o.width+t*2,height:o.height+t*2,x:o.x,y:o.y}},"calculateDimensionsWithPadding"),w=n((e,t,o,i,a)=>`${e-a} ${t-a} ${o} ${i}`,"createViewBox");export{u as g,b as s};

View file

@ -1 +1 @@
import{_ as s}from"./mermaid-vendor-BVBgFwCv.js";var t,e=(t=class{constructor(i){this.init=i,this.records=this.init()}reset(){this.records=this.init()}},s(t,"ImperativeState"),t);export{e as I};
import{_ as s}from"./mermaid-vendor-D0f_SE0h.js";var t,e=(t=class{constructor(i){this.init=i,this.records=this.init()}reset(){this.records=this.init()}},s(t,"ImperativeState"),t);export{e as I};

View file

@ -1 +1 @@
import{s as a,c as s,a as e,C as t}from"./chunk-A2AXSNBT-79sAOFxS.js";import{_ as i}from"./mermaid-vendor-BVBgFwCv.js";import"./chunk-RZ5BOZE2-PbmQbmec.js";import"./feature-graph-D-mwOi0p.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var f={parser:e,get db(){return new t},renderer:s,styles:a,init:i(r=>{r.class||(r.class={}),r.class.arrowMarkerAbsolute=r.arrowMarkerAbsolute},"init")};export{f as diagram};
import{s as a,c as s,a as e,C as t}from"./chunk-A2AXSNBT-B91iiasA.js";import{_ as i}from"./mermaid-vendor-D0f_SE0h.js";import"./chunk-RZ5BOZE2-B615FLH4.js";import"./feature-graph-NODQb6qW.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var f={parser:e,get db(){return new t},renderer:s,styles:a,init:i(r=>{r.class||(r.class={}),r.class.arrowMarkerAbsolute=r.arrowMarkerAbsolute},"init")};export{f as diagram};

View file

@ -1 +1 @@
import{s as a,c as s,a as e,C as t}from"./chunk-A2AXSNBT-79sAOFxS.js";import{_ as i}from"./mermaid-vendor-BVBgFwCv.js";import"./chunk-RZ5BOZE2-PbmQbmec.js";import"./feature-graph-D-mwOi0p.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var f={parser:e,get db(){return new t},renderer:s,styles:a,init:i(r=>{r.class||(r.class={}),r.class.arrowMarkerAbsolute=r.arrowMarkerAbsolute},"init")};export{f as diagram};
import{s as a,c as s,a as e,C as t}from"./chunk-A2AXSNBT-B91iiasA.js";import{_ as i}from"./mermaid-vendor-D0f_SE0h.js";import"./chunk-RZ5BOZE2-B615FLH4.js";import"./feature-graph-NODQb6qW.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var f={parser:e,get db(){return new t},renderer:s,styles:a,init:i(r=>{r.class||(r.class={}),r.class.arrowMarkerAbsolute=r.arrowMarkerAbsolute},"init")};export{f as diagram};

View file

@ -0,0 +1 @@
import{b as r}from"./_baseUniq-CtAZZJ8e.js";var e=4;function a(o){return r(o,e)}export{a as c};

View file

@ -1 +0,0 @@
import{b as r}from"./_baseUniq-CoKY6BVy.js";var e=4;function a(o){return r(o,e)}export{a as c};

View file

@ -1,4 +1,4 @@
import{p as k}from"./chunk-4BMEZGHF-BqribV_z.js";import{_ as l,s as R,g as F,t as I,q as _,a as E,b as D,K as G,z,F as y,G as C,H as P,l as H,Q as V}from"./mermaid-vendor-BVBgFwCv.js";import{p as W}from"./radar-MK3ICKWK-B0N6XiM2.js";import"./feature-graph-D-mwOi0p.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";import"./_baseUniq-CoKY6BVy.js";import"./_basePickBy-BVZSZRdU.js";import"./clone-UTZGcTvC.js";var h={showLegend:!0,ticks:5,max:null,min:0,graticule:"circle"},w={axes:[],curves:[],options:h},g=structuredClone(w),B=P.radar,j=l(()=>y({...B,...C().radar}),"getConfig"),b=l(()=>g.axes,"getAxes"),q=l(()=>g.curves,"getCurves"),K=l(()=>g.options,"getOptions"),N=l(a=>{g.axes=a.map(t=>({name:t.name,label:t.label??t.name}))},"setAxes"),Q=l(a=>{g.curves=a.map(t=>({name:t.name,label:t.label??t.name,entries:U(t.entries)}))},"setCurves"),U=l(a=>{if(a[0].axis==null)return a.map(e=>e.value);const t=b();if(t.length===0)throw new Error("Axes must be populated before curves for reference entries");return t.map(e=>{const r=a.find(s=>{var o;return((o=s.axis)==null?void 0:o.$refText)===e.name});if(r===void 0)throw new Error("Missing entry for axis "+e.label);return r.value})},"computeCurveEntries"),X=l(a=>{var e,r,s,o,i;const t=a.reduce((n,c)=>(n[c.name]=c,n),{});g.options={showLegend:((e=t.showLegend)==null?void 0:e.value)??h.showLegend,ticks:((r=t.ticks)==null?void 0:r.value)??h.ticks,max:((s=t.max)==null?void 0:s.value)??h.max,min:((o=t.min)==null?void 0:o.value)??h.min,graticule:((i=t.graticule)==null?void 0:i.value)??h.graticule}},"setOptions"),Y=l(()=>{z(),g=structuredClone(w)},"clear"),$={getAxes:b,getCurves:q,getOptions:K,setAxes:N,setCurves:Q,setOptions:X,getConfig:j,clear:Y,setAccTitle:D,getAccTitle:E,setDiagramTitle:_,getDiagramTitle:I,getAccDescription:F,setAccDescription:R},Z=l(a=>{k(a,$);const{axes:t,curves:e,options:r}=a;$.setAxes(t),$.setCurves(e),$.setOptions(r)},"populate"),J={parse:l(async a=>{const t=await W("radar",a);H.debug(t),Z(t)},"parse")},tt=l((a,t,e,r)=>{const s=r.db,o=s.getAxes(),i=s.getCurves(),n=s.getOptions(),c=s.getConfig(),d=s.getDiagramTitle(),u=G(t),p=et(u,c),m=n.max??Math.max(...i.map(f=>Math.max(...f.entries))),x=n.min,v=Math.min(c.width,c.height)/2;at(p,o,v,n.ticks,n.graticule),rt(p,o,v,c),M(p,o,i,x,m,n.graticule,c),T(p,i,n.showLegend,c),p.append("text").attr("class","radarTitle").text(d).attr("x",0).attr("y",-c.height/2-c.marginTop)},"draw"),et=l((a,t)=>{const e=t.width+t.marginLeft+t.marginRight,r=t.height+t.marginTop+t.marginBottom,s={x:t.marginLeft+t.width/2,y:t.marginTop+t.height/2};return a.attr("viewbox",`0 0 ${e} ${r}`).attr("width",e).attr("height",r),a.append("g").attr("transform",`translate(${s.x}, ${s.y})`)},"drawFrame"),at=l((a,t,e,r,s)=>{if(s==="circle")for(let o=0;o<r;o++){const i=e*(o+1)/r;a.append("circle").attr("r",i).attr("class","radarGraticule")}else if(s==="polygon"){const o=t.length;for(let i=0;i<r;i++){const n=e*(i+1)/r,c=t.map((d,u)=>{const p=2*u*Math.PI/o-Math.PI/2,m=n*Math.cos(p),x=n*Math.sin(p);return`${m},${x}`}).join(" ");a.append("polygon").attr("points",c).attr("class","radarGraticule")}}},"drawGraticule"),rt=l((a,t,e,r)=>{const s=t.length;for(let o=0;o<s;o++){const i=t[o].label,n=2*o*Math.PI/s-Math.PI/2;a.append("line").attr("x1",0).attr("y1",0).attr("x2",e*r.axisScaleFactor*Math.cos(n)).attr("y2",e*r.axisScaleFactor*Math.sin(n)).attr("class","radarAxisLine"),a.append("text").text(i).attr("x",e*r.axisLabelFactor*Math.cos(n)).attr("y",e*r.axisLabelFactor*Math.sin(n)).attr("class","radarAxisLabel")}},"drawAxes");function M(a,t,e,r,s,o,i){const n=t.length,c=Math.min(i.width,i.height)/2;e.forEach((d,u)=>{if(d.entries.length!==n)return;const p=d.entries.map((m,x)=>{const v=2*Math.PI*x/n-Math.PI/2,f=A(m,r,s,c),O=f*Math.cos(v),S=f*Math.sin(v);return{x:O,y:S}});o==="circle"?a.append("path").attr("d",L(p,i.curveTension)).attr("class",`radarCurve-${u}`):o==="polygon"&&a.append("polygon").attr("points",p.map(m=>`${m.x},${m.y}`).join(" ")).attr("class",`radarCurve-${u}`)})}l(M,"drawCurves");function A(a,t,e,r){const s=Math.min(Math.max(a,t),e);return r*(s-t)/(e-t)}l(A,"relativeRadius");function L(a,t){const e=a.length;let r=`M${a[0].x},${a[0].y}`;for(let s=0;s<e;s++){const o=a[(s-1+e)%e],i=a[s],n=a[(s+1)%e],c=a[(s+2)%e],d={x:i.x+(n.x-o.x)*t,y:i.y+(n.y-o.y)*t},u={x:n.x-(c.x-i.x)*t,y:n.y-(c.y-i.y)*t};r+=` C${d.x},${d.y} ${u.x},${u.y} ${n.x},${n.y}`}return`${r} Z`}l(L,"closedRoundCurve");function T(a,t,e,r){if(!e)return;const s=(r.width/2+r.marginRight)*3/4,o=-(r.height/2+r.marginTop)*3/4,i=20;t.forEach((n,c)=>{const d=a.append("g").attr("transform",`translate(${s}, ${o+c*i})`);d.append("rect").attr("width",12).attr("height",12).attr("class",`radarLegendBox-${c}`),d.append("text").attr("x",16).attr("y",0).attr("class","radarLegendText").text(n.label)})}l(T,"drawLegend");var st={draw:tt},nt=l((a,t)=>{let e="";for(let r=0;r<a.THEME_COLOR_LIMIT;r++){const s=a[`cScale${r}`];e+=`
import{p as k}from"./chunk-4BMEZGHF-CAhtCpmT.js";import{_ as l,s as R,g as F,t as I,q as _,a as E,b as D,K as G,z,F as y,G as C,H as P,l as H,Q as V}from"./mermaid-vendor-D0f_SE0h.js";import{p as W}from"./radar-MK3ICKWK-DOAXm8cx.js";import"./feature-graph-NODQb6qW.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";import"./_baseUniq-CtAZZJ8e.js";import"./_basePickBy-D3PHsJjq.js";import"./clone-Dm5jEAXQ.js";var h={showLegend:!0,ticks:5,max:null,min:0,graticule:"circle"},w={axes:[],curves:[],options:h},g=structuredClone(w),B=P.radar,j=l(()=>y({...B,...C().radar}),"getConfig"),b=l(()=>g.axes,"getAxes"),q=l(()=>g.curves,"getCurves"),K=l(()=>g.options,"getOptions"),N=l(a=>{g.axes=a.map(t=>({name:t.name,label:t.label??t.name}))},"setAxes"),Q=l(a=>{g.curves=a.map(t=>({name:t.name,label:t.label??t.name,entries:U(t.entries)}))},"setCurves"),U=l(a=>{if(a[0].axis==null)return a.map(e=>e.value);const t=b();if(t.length===0)throw new Error("Axes must be populated before curves for reference entries");return t.map(e=>{const r=a.find(s=>{var o;return((o=s.axis)==null?void 0:o.$refText)===e.name});if(r===void 0)throw new Error("Missing entry for axis "+e.label);return r.value})},"computeCurveEntries"),X=l(a=>{var e,r,s,o,i;const t=a.reduce((n,c)=>(n[c.name]=c,n),{});g.options={showLegend:((e=t.showLegend)==null?void 0:e.value)??h.showLegend,ticks:((r=t.ticks)==null?void 0:r.value)??h.ticks,max:((s=t.max)==null?void 0:s.value)??h.max,min:((o=t.min)==null?void 0:o.value)??h.min,graticule:((i=t.graticule)==null?void 0:i.value)??h.graticule}},"setOptions"),Y=l(()=>{z(),g=structuredClone(w)},"clear"),$={getAxes:b,getCurves:q,getOptions:K,setAxes:N,setCurves:Q,setOptions:X,getConfig:j,clear:Y,setAccTitle:D,getAccTitle:E,setDiagramTitle:_,getDiagramTitle:I,getAccDescription:F,setAccDescription:R},Z=l(a=>{k(a,$);const{axes:t,curves:e,options:r}=a;$.setAxes(t),$.setCurves(e),$.setOptions(r)},"populate"),J={parse:l(async a=>{const t=await W("radar",a);H.debug(t),Z(t)},"parse")},tt=l((a,t,e,r)=>{const s=r.db,o=s.getAxes(),i=s.getCurves(),n=s.getOptions(),c=s.getConfig(),d=s.getDiagramTitle(),u=G(t),p=et(u,c),m=n.max??Math.max(...i.map(f=>Math.max(...f.entries))),x=n.min,v=Math.min(c.width,c.height)/2;at(p,o,v,n.ticks,n.graticule),rt(p,o,v,c),M(p,o,i,x,m,n.graticule,c),T(p,i,n.showLegend,c),p.append("text").attr("class","radarTitle").text(d).attr("x",0).attr("y",-c.height/2-c.marginTop)},"draw"),et=l((a,t)=>{const e=t.width+t.marginLeft+t.marginRight,r=t.height+t.marginTop+t.marginBottom,s={x:t.marginLeft+t.width/2,y:t.marginTop+t.height/2};return a.attr("viewbox",`0 0 ${e} ${r}`).attr("width",e).attr("height",r),a.append("g").attr("transform",`translate(${s.x}, ${s.y})`)},"drawFrame"),at=l((a,t,e,r,s)=>{if(s==="circle")for(let o=0;o<r;o++){const i=e*(o+1)/r;a.append("circle").attr("r",i).attr("class","radarGraticule")}else if(s==="polygon"){const o=t.length;for(let i=0;i<r;i++){const n=e*(i+1)/r,c=t.map((d,u)=>{const p=2*u*Math.PI/o-Math.PI/2,m=n*Math.cos(p),x=n*Math.sin(p);return`${m},${x}`}).join(" ");a.append("polygon").attr("points",c).attr("class","radarGraticule")}}},"drawGraticule"),rt=l((a,t,e,r)=>{const s=t.length;for(let o=0;o<s;o++){const i=t[o].label,n=2*o*Math.PI/s-Math.PI/2;a.append("line").attr("x1",0).attr("y1",0).attr("x2",e*r.axisScaleFactor*Math.cos(n)).attr("y2",e*r.axisScaleFactor*Math.sin(n)).attr("class","radarAxisLine"),a.append("text").text(i).attr("x",e*r.axisLabelFactor*Math.cos(n)).attr("y",e*r.axisLabelFactor*Math.sin(n)).attr("class","radarAxisLabel")}},"drawAxes");function M(a,t,e,r,s,o,i){const n=t.length,c=Math.min(i.width,i.height)/2;e.forEach((d,u)=>{if(d.entries.length!==n)return;const p=d.entries.map((m,x)=>{const v=2*Math.PI*x/n-Math.PI/2,f=A(m,r,s,c),O=f*Math.cos(v),S=f*Math.sin(v);return{x:O,y:S}});o==="circle"?a.append("path").attr("d",L(p,i.curveTension)).attr("class",`radarCurve-${u}`):o==="polygon"&&a.append("polygon").attr("points",p.map(m=>`${m.x},${m.y}`).join(" ")).attr("class",`radarCurve-${u}`)})}l(M,"drawCurves");function A(a,t,e,r){const s=Math.min(Math.max(a,t),e);return r*(s-t)/(e-t)}l(A,"relativeRadius");function L(a,t){const e=a.length;let r=`M${a[0].x},${a[0].y}`;for(let s=0;s<e;s++){const o=a[(s-1+e)%e],i=a[s],n=a[(s+1)%e],c=a[(s+2)%e],d={x:i.x+(n.x-o.x)*t,y:i.y+(n.y-o.y)*t},u={x:n.x-(c.x-i.x)*t,y:n.y-(c.y-i.y)*t};r+=` C${d.x},${d.y} ${u.x},${u.y} ${n.x},${n.y}`}return`${r} Z`}l(L,"closedRoundCurve");function T(a,t,e,r){if(!e)return;const s=(r.width/2+r.marginRight)*3/4,o=-(r.height/2+r.marginTop)*3/4,i=20;t.forEach((n,c)=>{const d=a.append("g").attr("transform",`translate(${s}, ${o+c*i})`);d.append("rect").attr("width",12).attr("height",12).attr("class",`radarLegendBox-${c}`),d.append("text").attr("x",16).attr("y",0).attr("class","radarLegendText").text(n.label)})}l(T,"drawLegend");var st={draw:tt},nt=l((a,t)=>{let e="";for(let r=0;r<a.THEME_COLOR_LIMIT;r++){const s=a[`cScale${r}`];e+=`
.radarCurve-${r} {
color: ${s};
fill: ${s};

View file

@ -1,4 +1,4 @@
import{p as w}from"./chunk-4BMEZGHF-BqribV_z.js";import{_ as n,s as B,g as S,t as F,q as z,a as P,b as W,F as x,K as T,e as D,z as _,G as A,H as E,l as v}from"./mermaid-vendor-BVBgFwCv.js";import{p as N}from"./radar-MK3ICKWK-B0N6XiM2.js";import"./feature-graph-D-mwOi0p.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";import"./_baseUniq-CoKY6BVy.js";import"./_basePickBy-BVZSZRdU.js";import"./clone-UTZGcTvC.js";var C={packet:[]},h=structuredClone(C),L=E.packet,Y=n(()=>{const t=x({...L,...A().packet});return t.showBits&&(t.paddingY+=10),t},"getConfig"),G=n(()=>h.packet,"getPacket"),H=n(t=>{t.length>0&&h.packet.push(t)},"pushWord"),I=n(()=>{_(),h=structuredClone(C)},"clear"),m={pushWord:H,getPacket:G,getConfig:Y,clear:I,setAccTitle:W,getAccTitle:P,setDiagramTitle:z,getDiagramTitle:F,getAccDescription:S,setAccDescription:B},K=1e4,M=n(t=>{w(t,m);let e=-1,o=[],s=1;const{bitsPerRow:i}=m.getConfig();for(let{start:a,end:r,label:p}of t.blocks){if(r&&r<a)throw new Error(`Packet block ${a} - ${r} is invalid. End must be greater than start.`);if(a!==e+1)throw new Error(`Packet block ${a} - ${r??a} is not contiguous. It should start from ${e+1}.`);for(e=r??a,v.debug(`Packet block ${a} - ${e} with label ${p}`);o.length<=i+1&&m.getPacket().length<K;){const[b,c]=O({start:a,end:r,label:p},s,i);if(o.push(b),b.end+1===s*i&&(m.pushWord(o),o=[],s++),!c)break;({start:a,end:r,label:p}=c)}}m.pushWord(o)},"populate"),O=n((t,e,o)=>{if(t.end===void 0&&(t.end=t.start),t.start>t.end)throw new Error(`Block start ${t.start} is greater than block end ${t.end}.`);return t.end+1<=e*o?[t,void 0]:[{start:t.start,end:e*o-1,label:t.label},{start:e*o,end:t.end,label:t.label}]},"getNextFittingBlock"),q={parse:n(async t=>{const e=await N("packet",t);v.debug(e),M(e)},"parse")},R=n((t,e,o,s)=>{const i=s.db,a=i.getConfig(),{rowHeight:r,paddingY:p,bitWidth:b,bitsPerRow:c}=a,u=i.getPacket(),l=i.getDiagramTitle(),g=r+p,d=g*(u.length+1)-(l?0:r),k=b*c+2,f=T(e);f.attr("viewbox",`0 0 ${k} ${d}`),D(f,d,k,a.useMaxWidth);for(const[$,y]of u.entries())U(f,y,$,a);f.append("text").text(l).attr("x",k/2).attr("y",d-g/2).attr("dominant-baseline","middle").attr("text-anchor","middle").attr("class","packetTitle")},"draw"),U=n((t,e,o,{rowHeight:s,paddingX:i,paddingY:a,bitWidth:r,bitsPerRow:p,showBits:b})=>{const c=t.append("g"),u=o*(s+a)+a;for(const l of e){const g=l.start%p*r+1,d=(l.end-l.start+1)*r-i;if(c.append("rect").attr("x",g).attr("y",u).attr("width",d).attr("height",s).attr("class","packetBlock"),c.append("text").attr("x",g+d/2).attr("y",u+s/2).attr("class","packetLabel").attr("dominant-baseline","middle").attr("text-anchor","middle").text(l.label),!b)continue;const k=l.end===l.start,f=u-2;c.append("text").attr("x",g+(k?d/2:0)).attr("y",f).attr("class","packetByte start").attr("dominant-baseline","auto").attr("text-anchor",k?"middle":"start").text(l.start),k||c.append("text").attr("x",g+d).attr("y",f).attr("class","packetByte end").attr("dominant-baseline","auto").attr("text-anchor","end").text(l.end)}},"drawWord"),X={draw:R},j={byteFontSize:"10px",startByteColor:"black",endByteColor:"black",labelColor:"black",labelFontSize:"12px",titleColor:"black",titleFontSize:"14px",blockStrokeColor:"black",blockStrokeWidth:"1",blockFillColor:"#efefef"},J=n(({packet:t}={})=>{const e=x(j,t);return`
import{p as w}from"./chunk-4BMEZGHF-CAhtCpmT.js";import{_ as n,s as B,g as S,t as F,q as z,a as P,b as W,F as x,K as T,e as D,z as _,G as A,H as E,l as v}from"./mermaid-vendor-D0f_SE0h.js";import{p as N}from"./radar-MK3ICKWK-DOAXm8cx.js";import"./feature-graph-NODQb6qW.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";import"./_baseUniq-CtAZZJ8e.js";import"./_basePickBy-D3PHsJjq.js";import"./clone-Dm5jEAXQ.js";var C={packet:[]},h=structuredClone(C),L=E.packet,Y=n(()=>{const t=x({...L,...A().packet});return t.showBits&&(t.paddingY+=10),t},"getConfig"),G=n(()=>h.packet,"getPacket"),H=n(t=>{t.length>0&&h.packet.push(t)},"pushWord"),I=n(()=>{_(),h=structuredClone(C)},"clear"),m={pushWord:H,getPacket:G,getConfig:Y,clear:I,setAccTitle:W,getAccTitle:P,setDiagramTitle:z,getDiagramTitle:F,getAccDescription:S,setAccDescription:B},K=1e4,M=n(t=>{w(t,m);let e=-1,o=[],s=1;const{bitsPerRow:i}=m.getConfig();for(let{start:a,end:r,label:p}of t.blocks){if(r&&r<a)throw new Error(`Packet block ${a} - ${r} is invalid. End must be greater than start.`);if(a!==e+1)throw new Error(`Packet block ${a} - ${r??a} is not contiguous. It should start from ${e+1}.`);for(e=r??a,v.debug(`Packet block ${a} - ${e} with label ${p}`);o.length<=i+1&&m.getPacket().length<K;){const[b,c]=O({start:a,end:r,label:p},s,i);if(o.push(b),b.end+1===s*i&&(m.pushWord(o),o=[],s++),!c)break;({start:a,end:r,label:p}=c)}}m.pushWord(o)},"populate"),O=n((t,e,o)=>{if(t.end===void 0&&(t.end=t.start),t.start>t.end)throw new Error(`Block start ${t.start} is greater than block end ${t.end}.`);return t.end+1<=e*o?[t,void 0]:[{start:t.start,end:e*o-1,label:t.label},{start:e*o,end:t.end,label:t.label}]},"getNextFittingBlock"),q={parse:n(async t=>{const e=await N("packet",t);v.debug(e),M(e)},"parse")},R=n((t,e,o,s)=>{const i=s.db,a=i.getConfig(),{rowHeight:r,paddingY:p,bitWidth:b,bitsPerRow:c}=a,u=i.getPacket(),l=i.getDiagramTitle(),g=r+p,d=g*(u.length+1)-(l?0:r),k=b*c+2,f=T(e);f.attr("viewbox",`0 0 ${k} ${d}`),D(f,d,k,a.useMaxWidth);for(const[$,y]of u.entries())U(f,y,$,a);f.append("text").text(l).attr("x",k/2).attr("y",d-g/2).attr("dominant-baseline","middle").attr("text-anchor","middle").attr("class","packetTitle")},"draw"),U=n((t,e,o,{rowHeight:s,paddingX:i,paddingY:a,bitWidth:r,bitsPerRow:p,showBits:b})=>{const c=t.append("g"),u=o*(s+a)+a;for(const l of e){const g=l.start%p*r+1,d=(l.end-l.start+1)*r-i;if(c.append("rect").attr("x",g).attr("y",u).attr("width",d).attr("height",s).attr("class","packetBlock"),c.append("text").attr("x",g+d/2).attr("y",u+s/2).attr("class","packetLabel").attr("dominant-baseline","middle").attr("text-anchor","middle").text(l.label),!b)continue;const k=l.end===l.start,f=u-2;c.append("text").attr("x",g+(k?d/2:0)).attr("y",f).attr("class","packetByte start").attr("dominant-baseline","auto").attr("text-anchor",k?"middle":"start").text(l.start),k||c.append("text").attr("x",g+d).attr("y",f).attr("class","packetByte end").attr("dominant-baseline","auto").attr("text-anchor","end").text(l.end)}},"drawWord"),X={draw:R},j={byteFontSize:"10px",startByteColor:"black",endByteColor:"black",labelColor:"black",labelFontSize:"12px",titleColor:"black",titleFontSize:"14px",blockStrokeColor:"black",blockStrokeWidth:"1",blockFillColor:"#efefef"},J=n(({packet:t}={})=>{const e=x(j,t);return`
.packetByte {
font-size: ${e.byteFontSize};
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1,4 +1,4 @@
import{_ as m,o as O1,l as Z,c as Ge,d as Ce,p as H1,r as q1,u as i1,b as X1,s as Q1,q as J1,a as Z1,g as $1,t as et,k as tt,v as st,J as it,x as rt,y as s1,z as nt,A as at,B as ut,C as lt}from"./mermaid-vendor-BVBgFwCv.js";import{g as ot,s as ct}from"./chunk-RZ5BOZE2-PbmQbmec.js";import"./feature-graph-D-mwOi0p.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var ht="flowchart-",Pe,dt=(Pe=class{constructor(){this.vertexCounter=0,this.config=Ge(),this.vertices=new Map,this.edges=[],this.classes=new Map,this.subGraphs=[],this.subGraphLookup=new Map,this.tooltips=new Map,this.subCount=0,this.firstGraphFlag=!0,this.secCount=-1,this.posCrossRef=[],this.funs=[],this.setAccTitle=X1,this.setAccDescription=Q1,this.setDiagramTitle=J1,this.getAccTitle=Z1,this.getAccDescription=$1,this.getDiagramTitle=et,this.funs.push(this.setupToolTips.bind(this)),this.addVertex=this.addVertex.bind(this),this.firstGraph=this.firstGraph.bind(this),this.setDirection=this.setDirection.bind(this),this.addSubGraph=this.addSubGraph.bind(this),this.addLink=this.addLink.bind(this),this.setLink=this.setLink.bind(this),this.updateLink=this.updateLink.bind(this),this.addClass=this.addClass.bind(this),this.setClass=this.setClass.bind(this),this.destructLink=this.destructLink.bind(this),this.setClickEvent=this.setClickEvent.bind(this),this.setTooltip=this.setTooltip.bind(this),this.updateLinkInterpolate=this.updateLinkInterpolate.bind(this),this.setClickFun=this.setClickFun.bind(this),this.bindFunctions=this.bindFunctions.bind(this),this.lex={firstGraph:this.firstGraph.bind(this)},this.clear(),this.setGen("gen-2")}sanitizeText(i){return tt.sanitizeText(i,this.config)}lookUpDomId(i){for(const n of this.vertices.values())if(n.id===i)return n.domId;return i}addVertex(i,n,a,u,l,f,c={},A){var U,T;if(!i||i.trim().length===0)return;let r;if(A!==void 0){let d;A.includes(`
import{_ as m,o as O1,l as Z,c as Ge,d as Ce,p as H1,r as q1,u as i1,b as X1,s as Q1,q as J1,a as Z1,g as $1,t as et,k as tt,v as st,J as it,x as rt,y as s1,z as nt,A as at,B as ut,C as lt}from"./mermaid-vendor-D0f_SE0h.js";import{g as ot,s as ct}from"./chunk-RZ5BOZE2-B615FLH4.js";import"./feature-graph-NODQb6qW.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var ht="flowchart-",Pe,dt=(Pe=class{constructor(){this.vertexCounter=0,this.config=Ge(),this.vertices=new Map,this.edges=[],this.classes=new Map,this.subGraphs=[],this.subGraphLookup=new Map,this.tooltips=new Map,this.subCount=0,this.firstGraphFlag=!0,this.secCount=-1,this.posCrossRef=[],this.funs=[],this.setAccTitle=X1,this.setAccDescription=Q1,this.setDiagramTitle=J1,this.getAccTitle=Z1,this.getAccDescription=$1,this.getDiagramTitle=et,this.funs.push(this.setupToolTips.bind(this)),this.addVertex=this.addVertex.bind(this),this.firstGraph=this.firstGraph.bind(this),this.setDirection=this.setDirection.bind(this),this.addSubGraph=this.addSubGraph.bind(this),this.addLink=this.addLink.bind(this),this.setLink=this.setLink.bind(this),this.updateLink=this.updateLink.bind(this),this.addClass=this.addClass.bind(this),this.setClass=this.setClass.bind(this),this.destructLink=this.destructLink.bind(this),this.setClickEvent=this.setClickEvent.bind(this),this.setTooltip=this.setTooltip.bind(this),this.updateLinkInterpolate=this.updateLinkInterpolate.bind(this),this.setClickFun=this.setClickFun.bind(this),this.bindFunctions=this.bindFunctions.bind(this),this.lex={firstGraph:this.firstGraph.bind(this)},this.clear(),this.setGen("gen-2")}sanitizeText(i){return tt.sanitizeText(i,this.config)}lookUpDomId(i){for(const n of this.vertices.values())if(n.id===i)return n.domId;return i}addVertex(i,n,a,u,l,f,c={},A){var U,T;if(!i||i.trim().length===0)return;let r;if(A!==void 0){let d;A.includes(`
`)?d=A+`
`:d=`{
`+A+`

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1,2 +1,2 @@
import{_ as e,l as o,K as i,e as n,L as p}from"./mermaid-vendor-BVBgFwCv.js";import{p as m}from"./radar-MK3ICKWK-B0N6XiM2.js";import"./feature-graph-D-mwOi0p.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";import"./_baseUniq-CoKY6BVy.js";import"./_basePickBy-BVZSZRdU.js";import"./clone-UTZGcTvC.js";var g={parse:e(async r=>{const a=await m("info",r);o.debug(a)},"parse")},v={version:p.version},d=e(()=>v.version,"getVersion"),c={getVersion:d},l=e((r,a,s)=>{o.debug(`rendering info diagram
import{_ as e,l as o,K as i,e as n,L as p}from"./mermaid-vendor-D0f_SE0h.js";import{p as m}from"./radar-MK3ICKWK-DOAXm8cx.js";import"./feature-graph-NODQb6qW.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";import"./_baseUniq-CtAZZJ8e.js";import"./_basePickBy-D3PHsJjq.js";import"./clone-Dm5jEAXQ.js";var g={parse:e(async r=>{const a=await m("info",r);o.debug(a)},"parse")},v={version:p.version},d=e(()=>v.version,"getVersion"),c={getVersion:d},l=e((r,a,s)=>{o.debug(`rendering info diagram
`+r);const t=i(a);n(t,100,400,!0),t.append("g").append("text").attr("x",100).attr("y",40).attr("class","version").attr("font-size",32).style("text-anchor","middle").text(`v${s}`)},"draw"),f={draw:l},L={parser:g,db:c,renderer:f};export{L as diagram};

View file

@ -1,4 +1,4 @@
import{a as pt,g as at,f as gt,d as mt}from"./chunk-D6G4REZN-DYSFhgH1.js";import{_ as s,g as xt,s as kt,a as _t,b as bt,t as vt,q as wt,c as A,d as W,e as Tt,z as St,N as tt}from"./mermaid-vendor-BVBgFwCv.js";import"./feature-graph-D-mwOi0p.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var H=function(){var t=s(function(g,r,a,l){for(a=a||{},l=g.length;l--;a[g[l]]=r);return a},"o"),e=[6,8,10,11,12,14,16,17,18],i=[1,9],c=[1,10],n=[1,11],u=[1,12],h=[1,13],f=[1,14],d={trace:s(function(){},"trace"),yy:{},symbols_:{error:2,start:3,journey:4,document:5,EOF:6,line:7,SPACE:8,statement:9,NEWLINE:10,title:11,acc_title:12,acc_title_value:13,acc_descr:14,acc_descr_value:15,acc_descr_multiline_value:16,section:17,taskName:18,taskData:19,$accept:0,$end:1},terminals_:{2:"error",4:"journey",6:"EOF",8:"SPACE",10:"NEWLINE",11:"title",12:"acc_title",13:"acc_title_value",14:"acc_descr",15:"acc_descr_value",16:"acc_descr_multiline_value",17:"section",18:"taskName",19:"taskData"},productions_:[0,[3,3],[5,0],[5,2],[7,2],[7,1],[7,1],[7,1],[9,1],[9,2],[9,2],[9,1],[9,1],[9,2]],performAction:s(function(r,a,l,y,p,o,S){var _=o.length-1;switch(p){case 1:return o[_-1];case 2:this.$=[];break;case 3:o[_-1].push(o[_]),this.$=o[_-1];break;case 4:case 5:this.$=o[_];break;case 6:case 7:this.$=[];break;case 8:y.setDiagramTitle(o[_].substr(6)),this.$=o[_].substr(6);break;case 9:this.$=o[_].trim(),y.setAccTitle(this.$);break;case 10:case 11:this.$=o[_].trim(),y.setAccDescription(this.$);break;case 12:y.addSection(o[_].substr(8)),this.$=o[_].substr(8);break;case 13:y.addTask(o[_-1],o[_]),this.$="task";break}},"anonymous"),table:[{3:1,4:[1,2]},{1:[3]},t(e,[2,2],{5:3}),{6:[1,4],7:5,8:[1,6],9:7,10:[1,8],11:i,12:c,14:n,16:u,17:h,18:f},t(e,[2,7],{1:[2,1]}),t(e,[2,3]),{9:15,11:i,12:c,14:n,16:u,17:h,18:f},t(e,[2,5]),t(e,[2,6]),t(e,[2,8]),{13:[1,16]},{15:[1,17]},t(e,[2,11]),t(e,[2,12]),{19:[1,18]},t(e,[2,4]),t(e,[2,9]),t(e,[2,10]),t(e,[2,13])],defaultActions:{},parseError:s(function(r,a){if(a.recoverable)this.trace(r);else{var l=new Error(r);throw l.hash=a,l}},"parseError"),parse:s(function(r){var a=this,l=[0],y=[],p=[null],o=[],S=this.table,_="",B=0,J=0,ut=2,K=1,yt=o.slice.call(arguments,1),k=Object.create(this.lexer),E={yy:{}};for(var O in this.yy)Object.prototype.hasOwnProperty.call(this.yy,O)&&(E.yy[O]=this.yy[O]);k.setInput(r,E.yy),E.yy.lexer=k,E.yy.parser=this,typeof k.yylloc>"u"&&(k.yylloc={});var Y=k.yylloc;o.push(Y);var dt=k.options&&k.options.ranges;typeof E.yy.parseError=="function"?this.parseError=E.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;function ft(v){l.length=l.length-2*v,p.length=p.length-v,o.length=o.length-v}s(ft,"popStack");function Q(){var v;return v=y.pop()||k.lex()||K,typeof v!="number"&&(v instanceof Array&&(y=v,v=y.pop()),v=a.symbols_[v]||v),v}s(Q,"lex");for(var b,P,w,q,C={},N,$,D,j;;){if(P=l[l.length-1],this.defaultActions[P]?w=this.defaultActions[P]:((b===null||typeof b>"u")&&(b=Q()),w=S[P]&&S[P][b]),typeof w>"u"||!w.length||!w[0]){var G="";j=[];for(N in S[P])this.terminals_[N]&&N>ut&&j.push("'"+this.terminals_[N]+"'");k.showPosition?G="Parse error on line "+(B+1)+`:
import{a as pt,g as at,f as gt,d as mt}from"./chunk-D6G4REZN-CGaqGId9.js";import{_ as s,g as xt,s as kt,a as _t,b as bt,t as vt,q as wt,c as A,d as W,e as Tt,z as St,N as tt}from"./mermaid-vendor-D0f_SE0h.js";import"./feature-graph-NODQb6qW.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var H=function(){var t=s(function(g,r,a,l){for(a=a||{},l=g.length;l--;a[g[l]]=r);return a},"o"),e=[6,8,10,11,12,14,16,17,18],i=[1,9],c=[1,10],n=[1,11],u=[1,12],h=[1,13],f=[1,14],d={trace:s(function(){},"trace"),yy:{},symbols_:{error:2,start:3,journey:4,document:5,EOF:6,line:7,SPACE:8,statement:9,NEWLINE:10,title:11,acc_title:12,acc_title_value:13,acc_descr:14,acc_descr_value:15,acc_descr_multiline_value:16,section:17,taskName:18,taskData:19,$accept:0,$end:1},terminals_:{2:"error",4:"journey",6:"EOF",8:"SPACE",10:"NEWLINE",11:"title",12:"acc_title",13:"acc_title_value",14:"acc_descr",15:"acc_descr_value",16:"acc_descr_multiline_value",17:"section",18:"taskName",19:"taskData"},productions_:[0,[3,3],[5,0],[5,2],[7,2],[7,1],[7,1],[7,1],[9,1],[9,2],[9,2],[9,1],[9,1],[9,2]],performAction:s(function(r,a,l,y,p,o,S){var _=o.length-1;switch(p){case 1:return o[_-1];case 2:this.$=[];break;case 3:o[_-1].push(o[_]),this.$=o[_-1];break;case 4:case 5:this.$=o[_];break;case 6:case 7:this.$=[];break;case 8:y.setDiagramTitle(o[_].substr(6)),this.$=o[_].substr(6);break;case 9:this.$=o[_].trim(),y.setAccTitle(this.$);break;case 10:case 11:this.$=o[_].trim(),y.setAccDescription(this.$);break;case 12:y.addSection(o[_].substr(8)),this.$=o[_].substr(8);break;case 13:y.addTask(o[_-1],o[_]),this.$="task";break}},"anonymous"),table:[{3:1,4:[1,2]},{1:[3]},t(e,[2,2],{5:3}),{6:[1,4],7:5,8:[1,6],9:7,10:[1,8],11:i,12:c,14:n,16:u,17:h,18:f},t(e,[2,7],{1:[2,1]}),t(e,[2,3]),{9:15,11:i,12:c,14:n,16:u,17:h,18:f},t(e,[2,5]),t(e,[2,6]),t(e,[2,8]),{13:[1,16]},{15:[1,17]},t(e,[2,11]),t(e,[2,12]),{19:[1,18]},t(e,[2,4]),t(e,[2,9]),t(e,[2,10]),t(e,[2,13])],defaultActions:{},parseError:s(function(r,a){if(a.recoverable)this.trace(r);else{var l=new Error(r);throw l.hash=a,l}},"parseError"),parse:s(function(r){var a=this,l=[0],y=[],p=[null],o=[],S=this.table,_="",B=0,J=0,ut=2,K=1,yt=o.slice.call(arguments,1),k=Object.create(this.lexer),E={yy:{}};for(var O in this.yy)Object.prototype.hasOwnProperty.call(this.yy,O)&&(E.yy[O]=this.yy[O]);k.setInput(r,E.yy),E.yy.lexer=k,E.yy.parser=this,typeof k.yylloc>"u"&&(k.yylloc={});var Y=k.yylloc;o.push(Y);var dt=k.options&&k.options.ranges;typeof E.yy.parseError=="function"?this.parseError=E.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;function ft(v){l.length=l.length-2*v,p.length=p.length-v,o.length=o.length-v}s(ft,"popStack");function Q(){var v;return v=y.pop()||k.lex()||K,typeof v!="number"&&(v instanceof Array&&(y=v,v=y.pop()),v=a.symbols_[v]||v),v}s(Q,"lex");for(var b,P,w,q,C={},N,$,D,j;;){if(P=l[l.length-1],this.defaultActions[P]?w=this.defaultActions[P]:((b===null||typeof b>"u")&&(b=Q()),w=S[P]&&S[P][b]),typeof w>"u"||!w.length||!w[0]){var G="";j=[];for(N in S[P])this.terminals_[N]&&N>ut&&j.push("'"+this.terminals_[N]+"'");k.showPosition?G="Parse error on line "+(B+1)+`:
`+k.showPosition()+`
Expecting `+j.join(", ")+", got '"+(this.terminals_[b]||b)+"'":G="Parse error on line "+(B+1)+": Unexpected "+(b==K?"end of input":"'"+(this.terminals_[b]||b)+"'"),this.parseError(G,{text:k.match,token:this.terminals_[b]||b,line:k.yylineno,loc:Y,expected:j})}if(w[0]instanceof Array&&w.length>1)throw new Error("Parse Error: multiple actions possible at state: "+P+", token: "+b);switch(w[0]){case 1:l.push(b),p.push(k.yytext),o.push(k.yylloc),l.push(w[1]),b=null,J=k.yyleng,_=k.yytext,B=k.yylineno,Y=k.yylloc;break;case 2:if($=this.productions_[w[1]][1],C.$=p[p.length-$],C._$={first_line:o[o.length-($||1)].first_line,last_line:o[o.length-1].last_line,first_column:o[o.length-($||1)].first_column,last_column:o[o.length-1].last_column},dt&&(C._$.range=[o[o.length-($||1)].range[0],o[o.length-1].range[1]]),q=this.performAction.apply(C,[_,J,B,E.yy,w[1],p,o].concat(yt)),typeof q<"u")return q;$&&(l=l.slice(0,-1*$*2),p=p.slice(0,-1*$),o=o.slice(0,-1*$)),l.push(this.productions_[w[1]][0]),p.push(C.$),o.push(C._$),D=S[l[l.length-2]][l[l.length-1]],l.push(D);break;case 3:return!0}}return!0},"parse")},x=function(){var g={EOF:1,parseError:s(function(a,l){if(this.yy.parser)this.yy.parser.parseError(a,l);else throw new Error(a)},"parseError"),setInput:s(function(r,a){return this.yy=a||this.yy||{},this._input=r,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},"setInput"),input:s(function(){var r=this._input[0];this.yytext+=r,this.yyleng++,this.offset++,this.match+=r,this.matched+=r;var a=r.match(/(?:\r\n?|\n).*/g);return a?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),r},"input"),unput:s(function(r){var a=r.length,l=r.split(/(?:\r\n?|\n)/g);this._input=r+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-a),this.offset-=a;var y=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),l.length-1&&(this.yylineno-=l.length-1);var p=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:l?(l.length===y.length?this.yylloc.first_column:0)+y[y.length-l.length].length-l[0].length:this.yylloc.first_column-a},this.options.ranges&&(this.yylloc.range=[p[0],p[0]+this.yyleng-a]),this.yyleng=this.yytext.length,this},"unput"),more:s(function(){return this._more=!0,this},"more"),reject:s(function(){if(this.options.backtrack_lexer)this._backtrack=!0;else return this.parseError("Lexical error on line "+(this.yylineno+1)+`. You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).
`+this.showPosition(),{text:"",token:null,line:this.yylineno});return this},"reject"),less:s(function(r){this.unput(this.match.slice(r))},"less"),pastInput:s(function(){var r=this.matched.substr(0,this.matched.length-this.match.length);return(r.length>20?"...":"")+r.substr(-20).replace(/\n/g,"")},"pastInput"),upcomingInput:s(function(){var r=this.match;return r.length<20&&(r+=this._input.substr(0,20-r.length)),(r.substr(0,20)+(r.length>20?"...":"")).replace(/\n/g,"")},"upcomingInput"),showPosition:s(function(){var r=this.pastInput(),a=new Array(r.length+1).join("-");return r+this.upcomingInput()+`

View file

@ -1,4 +1,4 @@
import{_ as c,l as te,c as W,K as fe,a7 as ye,a8 as be,a9 as me,a2 as _e,H as Y,i as G,v as Ee,J as ke,a3 as Se,a4 as le,a5 as ce}from"./mermaid-vendor-BVBgFwCv.js";import"./feature-graph-D-mwOi0p.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var $=function(){var t=c(function(_,i,n,a){for(n=n||{},a=_.length;a--;n[_[a]]=i);return n},"o"),g=[1,4],d=[1,13],r=[1,12],p=[1,15],E=[1,16],f=[1,20],h=[1,19],L=[6,7,8],C=[1,26],w=[1,24],N=[1,25],s=[6,7,11],H=[1,31],x=[6,7,11,24],P=[1,6,13,16,17,20,23],M=[1,35],U=[1,36],A=[1,6,7,11,13,16,17,20,23],j=[1,38],V={trace:c(function(){},"trace"),yy:{},symbols_:{error:2,start:3,mindMap:4,spaceLines:5,SPACELINE:6,NL:7,KANBAN:8,document:9,stop:10,EOF:11,statement:12,SPACELIST:13,node:14,shapeData:15,ICON:16,CLASS:17,nodeWithId:18,nodeWithoutId:19,NODE_DSTART:20,NODE_DESCR:21,NODE_DEND:22,NODE_ID:23,SHAPE_DATA:24,$accept:0,$end:1},terminals_:{2:"error",6:"SPACELINE",7:"NL",8:"KANBAN",11:"EOF",13:"SPACELIST",16:"ICON",17:"CLASS",20:"NODE_DSTART",21:"NODE_DESCR",22:"NODE_DEND",23:"NODE_ID",24:"SHAPE_DATA"},productions_:[0,[3,1],[3,2],[5,1],[5,2],[5,2],[4,2],[4,3],[10,1],[10,1],[10,1],[10,2],[10,2],[9,3],[9,2],[12,3],[12,2],[12,2],[12,2],[12,1],[12,2],[12,1],[12,1],[12,1],[12,1],[14,1],[14,1],[19,3],[18,1],[18,4],[15,2],[15,1]],performAction:c(function(i,n,a,o,u,e,B){var l=e.length-1;switch(u){case 6:case 7:return o;case 8:o.getLogger().trace("Stop NL ");break;case 9:o.getLogger().trace("Stop EOF ");break;case 11:o.getLogger().trace("Stop NL2 ");break;case 12:o.getLogger().trace("Stop EOF2 ");break;case 15:o.getLogger().info("Node: ",e[l-1].id),o.addNode(e[l-2].length,e[l-1].id,e[l-1].descr,e[l-1].type,e[l]);break;case 16:o.getLogger().info("Node: ",e[l].id),o.addNode(e[l-1].length,e[l].id,e[l].descr,e[l].type);break;case 17:o.getLogger().trace("Icon: ",e[l]),o.decorateNode({icon:e[l]});break;case 18:case 23:o.decorateNode({class:e[l]});break;case 19:o.getLogger().trace("SPACELIST");break;case 20:o.getLogger().trace("Node: ",e[l-1].id),o.addNode(0,e[l-1].id,e[l-1].descr,e[l-1].type,e[l]);break;case 21:o.getLogger().trace("Node: ",e[l].id),o.addNode(0,e[l].id,e[l].descr,e[l].type);break;case 22:o.decorateNode({icon:e[l]});break;case 27:o.getLogger().trace("node found ..",e[l-2]),this.$={id:e[l-1],descr:e[l-1],type:o.getType(e[l-2],e[l])};break;case 28:this.$={id:e[l],descr:e[l],type:0};break;case 29:o.getLogger().trace("node found ..",e[l-3]),this.$={id:e[l-3],descr:e[l-1],type:o.getType(e[l-2],e[l])};break;case 30:this.$=e[l-1]+e[l];break;case 31:this.$=e[l];break}},"anonymous"),table:[{3:1,4:2,5:3,6:[1,5],8:g},{1:[3]},{1:[2,1]},{4:6,6:[1,7],7:[1,8],8:g},{6:d,7:[1,10],9:9,12:11,13:r,14:14,16:p,17:E,18:17,19:18,20:f,23:h},t(L,[2,3]),{1:[2,2]},t(L,[2,4]),t(L,[2,5]),{1:[2,6],6:d,12:21,13:r,14:14,16:p,17:E,18:17,19:18,20:f,23:h},{6:d,9:22,12:11,13:r,14:14,16:p,17:E,18:17,19:18,20:f,23:h},{6:C,7:w,10:23,11:N},t(s,[2,24],{18:17,19:18,14:27,16:[1,28],17:[1,29],20:f,23:h}),t(s,[2,19]),t(s,[2,21],{15:30,24:H}),t(s,[2,22]),t(s,[2,23]),t(x,[2,25]),t(x,[2,26]),t(x,[2,28],{20:[1,32]}),{21:[1,33]},{6:C,7:w,10:34,11:N},{1:[2,7],6:d,12:21,13:r,14:14,16:p,17:E,18:17,19:18,20:f,23:h},t(P,[2,14],{7:M,11:U}),t(A,[2,8]),t(A,[2,9]),t(A,[2,10]),t(s,[2,16],{15:37,24:H}),t(s,[2,17]),t(s,[2,18]),t(s,[2,20],{24:j}),t(x,[2,31]),{21:[1,39]},{22:[1,40]},t(P,[2,13],{7:M,11:U}),t(A,[2,11]),t(A,[2,12]),t(s,[2,15],{24:j}),t(x,[2,30]),{22:[1,41]},t(x,[2,27]),t(x,[2,29])],defaultActions:{2:[2,1],6:[2,2]},parseError:c(function(i,n){if(n.recoverable)this.trace(i);else{var a=new Error(i);throw a.hash=n,a}},"parseError"),parse:c(function(i){var n=this,a=[0],o=[],u=[null],e=[],B=this.table,l="",z=0,se=0,ue=2,re=1,ge=e.slice.call(arguments,1),b=Object.create(this.lexer),T={yy:{}};for(var J in this.yy)Object.prototype.hasOwnProperty.call(this.yy,J)&&(T.yy[J]=this.yy[J]);b.setInput(i,T.yy),T.yy.lexer=b,T.yy.parser=this,typeof b.yylloc>"u"&&(b.yylloc={});var q=b.yylloc;e.push(q);var de=b.options&&b.options.ranges;typeof T.yy.parseError=="function"?this.parseError=T.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;function pe(S){a.length=a.length-2*S,u.length=u.length-S,e.length=e.length-S}c(pe,"popStack");function ae(){var S;return S=o.pop()||b.lex()||re,typeof S!="number"&&(S instanceof Array&&(o=S,S=o.pop()),S=n.symbols_[S]||S),S}c(ae,"lex");for(var k,R,v,Q,F={},K,I,oe,X;;){if(R=a[a.length-1],this.defaultActions[R]?v=this.defaultActions[R]:((k===null||typeof k>"u")&&(k=ae()),v=B[R]&&B[R][k]),typeof v>"u"||!v.length||!v[0]){var Z="";X=[];for(K in B[R])this.terminals_[K]&&K>ue&&X.push("'"+this.terminals_[K]+"'");b.showPosition?Z="Parse error on line "+(z+1)+`:
import{_ as c,l as te,c as W,K as fe,a7 as ye,a8 as be,a9 as me,a2 as _e,H as Y,i as G,v as Ee,J as ke,a3 as Se,a4 as le,a5 as ce}from"./mermaid-vendor-D0f_SE0h.js";import"./feature-graph-NODQb6qW.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var $=function(){var t=c(function(_,i,n,a){for(n=n||{},a=_.length;a--;n[_[a]]=i);return n},"o"),g=[1,4],d=[1,13],r=[1,12],p=[1,15],E=[1,16],f=[1,20],h=[1,19],L=[6,7,8],C=[1,26],w=[1,24],N=[1,25],s=[6,7,11],H=[1,31],x=[6,7,11,24],P=[1,6,13,16,17,20,23],M=[1,35],U=[1,36],A=[1,6,7,11,13,16,17,20,23],j=[1,38],V={trace:c(function(){},"trace"),yy:{},symbols_:{error:2,start:3,mindMap:4,spaceLines:5,SPACELINE:6,NL:7,KANBAN:8,document:9,stop:10,EOF:11,statement:12,SPACELIST:13,node:14,shapeData:15,ICON:16,CLASS:17,nodeWithId:18,nodeWithoutId:19,NODE_DSTART:20,NODE_DESCR:21,NODE_DEND:22,NODE_ID:23,SHAPE_DATA:24,$accept:0,$end:1},terminals_:{2:"error",6:"SPACELINE",7:"NL",8:"KANBAN",11:"EOF",13:"SPACELIST",16:"ICON",17:"CLASS",20:"NODE_DSTART",21:"NODE_DESCR",22:"NODE_DEND",23:"NODE_ID",24:"SHAPE_DATA"},productions_:[0,[3,1],[3,2],[5,1],[5,2],[5,2],[4,2],[4,3],[10,1],[10,1],[10,1],[10,2],[10,2],[9,3],[9,2],[12,3],[12,2],[12,2],[12,2],[12,1],[12,2],[12,1],[12,1],[12,1],[12,1],[14,1],[14,1],[19,3],[18,1],[18,4],[15,2],[15,1]],performAction:c(function(i,n,a,o,u,e,B){var l=e.length-1;switch(u){case 6:case 7:return o;case 8:o.getLogger().trace("Stop NL ");break;case 9:o.getLogger().trace("Stop EOF ");break;case 11:o.getLogger().trace("Stop NL2 ");break;case 12:o.getLogger().trace("Stop EOF2 ");break;case 15:o.getLogger().info("Node: ",e[l-1].id),o.addNode(e[l-2].length,e[l-1].id,e[l-1].descr,e[l-1].type,e[l]);break;case 16:o.getLogger().info("Node: ",e[l].id),o.addNode(e[l-1].length,e[l].id,e[l].descr,e[l].type);break;case 17:o.getLogger().trace("Icon: ",e[l]),o.decorateNode({icon:e[l]});break;case 18:case 23:o.decorateNode({class:e[l]});break;case 19:o.getLogger().trace("SPACELIST");break;case 20:o.getLogger().trace("Node: ",e[l-1].id),o.addNode(0,e[l-1].id,e[l-1].descr,e[l-1].type,e[l]);break;case 21:o.getLogger().trace("Node: ",e[l].id),o.addNode(0,e[l].id,e[l].descr,e[l].type);break;case 22:o.decorateNode({icon:e[l]});break;case 27:o.getLogger().trace("node found ..",e[l-2]),this.$={id:e[l-1],descr:e[l-1],type:o.getType(e[l-2],e[l])};break;case 28:this.$={id:e[l],descr:e[l],type:0};break;case 29:o.getLogger().trace("node found ..",e[l-3]),this.$={id:e[l-3],descr:e[l-1],type:o.getType(e[l-2],e[l])};break;case 30:this.$=e[l-1]+e[l];break;case 31:this.$=e[l];break}},"anonymous"),table:[{3:1,4:2,5:3,6:[1,5],8:g},{1:[3]},{1:[2,1]},{4:6,6:[1,7],7:[1,8],8:g},{6:d,7:[1,10],9:9,12:11,13:r,14:14,16:p,17:E,18:17,19:18,20:f,23:h},t(L,[2,3]),{1:[2,2]},t(L,[2,4]),t(L,[2,5]),{1:[2,6],6:d,12:21,13:r,14:14,16:p,17:E,18:17,19:18,20:f,23:h},{6:d,9:22,12:11,13:r,14:14,16:p,17:E,18:17,19:18,20:f,23:h},{6:C,7:w,10:23,11:N},t(s,[2,24],{18:17,19:18,14:27,16:[1,28],17:[1,29],20:f,23:h}),t(s,[2,19]),t(s,[2,21],{15:30,24:H}),t(s,[2,22]),t(s,[2,23]),t(x,[2,25]),t(x,[2,26]),t(x,[2,28],{20:[1,32]}),{21:[1,33]},{6:C,7:w,10:34,11:N},{1:[2,7],6:d,12:21,13:r,14:14,16:p,17:E,18:17,19:18,20:f,23:h},t(P,[2,14],{7:M,11:U}),t(A,[2,8]),t(A,[2,9]),t(A,[2,10]),t(s,[2,16],{15:37,24:H}),t(s,[2,17]),t(s,[2,18]),t(s,[2,20],{24:j}),t(x,[2,31]),{21:[1,39]},{22:[1,40]},t(P,[2,13],{7:M,11:U}),t(A,[2,11]),t(A,[2,12]),t(s,[2,15],{24:j}),t(x,[2,30]),{22:[1,41]},t(x,[2,27]),t(x,[2,29])],defaultActions:{2:[2,1],6:[2,2]},parseError:c(function(i,n){if(n.recoverable)this.trace(i);else{var a=new Error(i);throw a.hash=n,a}},"parseError"),parse:c(function(i){var n=this,a=[0],o=[],u=[null],e=[],B=this.table,l="",z=0,se=0,ue=2,re=1,ge=e.slice.call(arguments,1),b=Object.create(this.lexer),T={yy:{}};for(var J in this.yy)Object.prototype.hasOwnProperty.call(this.yy,J)&&(T.yy[J]=this.yy[J]);b.setInput(i,T.yy),T.yy.lexer=b,T.yy.parser=this,typeof b.yylloc>"u"&&(b.yylloc={});var q=b.yylloc;e.push(q);var de=b.options&&b.options.ranges;typeof T.yy.parseError=="function"?this.parseError=T.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;function pe(S){a.length=a.length-2*S,u.length=u.length-S,e.length=e.length-S}c(pe,"popStack");function ae(){var S;return S=o.pop()||b.lex()||re,typeof S!="number"&&(S instanceof Array&&(o=S,S=o.pop()),S=n.symbols_[S]||S),S}c(ae,"lex");for(var k,R,v,Q,F={},K,I,oe,X;;){if(R=a[a.length-1],this.defaultActions[R]?v=this.defaultActions[R]:((k===null||typeof k>"u")&&(k=ae()),v=B[R]&&B[R][k]),typeof v>"u"||!v.length||!v[0]){var Z="";X=[];for(K in B[R])this.terminals_[K]&&K>ue&&X.push("'"+this.terminals_[K]+"'");b.showPosition?Z="Parse error on line "+(z+1)+`:
`+b.showPosition()+`
Expecting `+X.join(", ")+", got '"+(this.terminals_[k]||k)+"'":Z="Parse error on line "+(z+1)+": Unexpected "+(k==re?"end of input":"'"+(this.terminals_[k]||k)+"'"),this.parseError(Z,{text:b.match,token:this.terminals_[k]||k,line:b.yylineno,loc:q,expected:X})}if(v[0]instanceof Array&&v.length>1)throw new Error("Parse Error: multiple actions possible at state: "+R+", token: "+k);switch(v[0]){case 1:a.push(k),u.push(b.yytext),e.push(b.yylloc),a.push(v[1]),k=null,se=b.yyleng,l=b.yytext,z=b.yylineno,q=b.yylloc;break;case 2:if(I=this.productions_[v[1]][1],F.$=u[u.length-I],F._$={first_line:e[e.length-(I||1)].first_line,last_line:e[e.length-1].last_line,first_column:e[e.length-(I||1)].first_column,last_column:e[e.length-1].last_column},de&&(F._$.range=[e[e.length-(I||1)].range[0],e[e.length-1].range[1]]),Q=this.performAction.apply(F,[l,se,z,T.yy,v[1],u,e].concat(ge)),typeof Q<"u")return Q;I&&(a=a.slice(0,-1*I*2),u=u.slice(0,-1*I),e=e.slice(0,-1*I)),a.push(this.productions_[v[1]][0]),u.push(F.$),e.push(F._$),oe=B[a[a.length-2]][a[a.length-1]],a.push(oe);break;case 3:return!0}}return!0},"parse")},m=function(){var _={EOF:1,parseError:c(function(n,a){if(this.yy.parser)this.yy.parser.parseError(n,a);else throw new Error(n)},"parseError"),setInput:c(function(i,n){return this.yy=n||this.yy||{},this._input=i,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},"setInput"),input:c(function(){var i=this._input[0];this.yytext+=i,this.yyleng++,this.offset++,this.match+=i,this.matched+=i;var n=i.match(/(?:\r\n?|\n).*/g);return n?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),i},"input"),unput:c(function(i){var n=i.length,a=i.split(/(?:\r\n?|\n)/g);this._input=i+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-n),this.offset-=n;var o=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),a.length-1&&(this.yylineno-=a.length-1);var u=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:a?(a.length===o.length?this.yylloc.first_column:0)+o[o.length-a.length].length-a[0].length:this.yylloc.first_column-n},this.options.ranges&&(this.yylloc.range=[u[0],u[0]+this.yyleng-n]),this.yyleng=this.yytext.length,this},"unput"),more:c(function(){return this._more=!0,this},"more"),reject:c(function(){if(this.options.backtrack_lexer)this._backtrack=!0;else return this.parseError("Lexical error on line "+(this.yylineno+1)+`. You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).
`+this.showPosition(),{text:"",token:null,line:this.yylineno});return this},"reject"),less:c(function(i){this.unput(this.match.slice(i))},"less"),pastInput:c(function(){var i=this.matched.substr(0,this.matched.length-this.match.length);return(i.length>20?"...":"")+i.substr(-20).replace(/\n/g,"")},"pastInput"),upcomingInput:c(function(){var i=this.match;return i.length<20&&(i+=this._input.substr(0,20-i.length)),(i.substr(0,20)+(i.length>20?"...":"")).replace(/\n/g,"")},"upcomingInput"),showPosition:c(function(){var i=this.pastInput(),n=new Array(i.length+1).join("-");return i+this.upcomingInput()+`

File diff suppressed because one or more lines are too long

View file

@ -1,4 +1,4 @@
import{p as N}from"./chunk-4BMEZGHF-BqribV_z.js";import{_ as i,g as B,s as U,a as q,b as H,t as K,q as V,l as C,c as Z,F as j,K as J,M as Q,N as z,O as X,e as Y,z as tt,P as et,H as at}from"./mermaid-vendor-BVBgFwCv.js";import{p as rt}from"./radar-MK3ICKWK-B0N6XiM2.js";import"./feature-graph-D-mwOi0p.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";import"./_baseUniq-CoKY6BVy.js";import"./_basePickBy-BVZSZRdU.js";import"./clone-UTZGcTvC.js";var it=at.pie,D={sections:new Map,showData:!1},f=D.sections,w=D.showData,st=structuredClone(it),ot=i(()=>structuredClone(st),"getConfig"),nt=i(()=>{f=new Map,w=D.showData,tt()},"clear"),lt=i(({label:t,value:a})=>{f.has(t)||(f.set(t,a),C.debug(`added new section: ${t}, with value: ${a}`))},"addSection"),ct=i(()=>f,"getSections"),pt=i(t=>{w=t},"setShowData"),dt=i(()=>w,"getShowData"),F={getConfig:ot,clear:nt,setDiagramTitle:V,getDiagramTitle:K,setAccTitle:H,getAccTitle:q,setAccDescription:U,getAccDescription:B,addSection:lt,getSections:ct,setShowData:pt,getShowData:dt},gt=i((t,a)=>{N(t,a),a.setShowData(t.showData),t.sections.map(a.addSection)},"populateDb"),ut={parse:i(async t=>{const a=await rt("pie",t);C.debug(a),gt(a,F)},"parse")},mt=i(t=>`
import{p as N}from"./chunk-4BMEZGHF-CAhtCpmT.js";import{_ as i,g as B,s as U,a as q,b as H,t as K,q as V,l as C,c as Z,F as j,K as J,M as Q,N as z,O as X,e as Y,z as tt,P as et,H as at}from"./mermaid-vendor-D0f_SE0h.js";import{p as rt}from"./radar-MK3ICKWK-DOAXm8cx.js";import"./feature-graph-NODQb6qW.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";import"./_baseUniq-CtAZZJ8e.js";import"./_basePickBy-D3PHsJjq.js";import"./clone-Dm5jEAXQ.js";var it=at.pie,D={sections:new Map,showData:!1},f=D.sections,w=D.showData,st=structuredClone(it),ot=i(()=>structuredClone(st),"getConfig"),nt=i(()=>{f=new Map,w=D.showData,tt()},"clear"),lt=i(({label:t,value:a})=>{f.has(t)||(f.set(t,a),C.debug(`added new section: ${t}, with value: ${a}`))},"addSection"),ct=i(()=>f,"getSections"),pt=i(t=>{w=t},"setShowData"),dt=i(()=>w,"getShowData"),F={getConfig:ot,clear:nt,setDiagramTitle:V,getDiagramTitle:K,setAccTitle:H,getAccTitle:q,setAccDescription:U,getAccDescription:B,addSection:lt,getSections:ct,setShowData:pt,getShowData:dt},gt=i((t,a)=>{N(t,a),a.setShowData(t.showData),t.sections.map(a.addSection)},"populateDb"),ut={parse:i(async t=>{const a=await rt("pie",t);C.debug(a),gt(a,F)},"parse")},mt=i(t=>`
.pieCircle{
stroke: ${t.pieStrokeColor};
stroke-width : ${t.pieStrokeWidth};

View file

@ -1 +1 @@
import{s as r,b as e,a,S as s}from"./chunk-AEK57VVT-C_8ebDHI.js";import{_ as i}from"./mermaid-vendor-BVBgFwCv.js";import"./chunk-RZ5BOZE2-PbmQbmec.js";import"./feature-graph-D-mwOi0p.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var b={parser:a,get db(){return new s(2)},renderer:e,styles:r,init:i(t=>{t.state||(t.state={}),t.state.arrowMarkerAbsolute=t.arrowMarkerAbsolute},"init")};export{b as diagram};
import{s as r,b as e,a,S as s}from"./chunk-AEK57VVT-gQ4j2jcG.js";import{_ as i}from"./mermaid-vendor-D0f_SE0h.js";import"./chunk-RZ5BOZE2-B615FLH4.js";import"./feature-graph-NODQb6qW.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var b={parser:a,get db(){return new s(2)},renderer:e,styles:r,init:i(t=>{t.state||(t.state={}),t.state.arrowMarkerAbsolute=t.arrowMarkerAbsolute},"init")};export{b as diagram};

View file

@ -1,4 +1,4 @@
import{_ as s,c as xt,l as T,d as q,a2 as kt,a3 as _t,a4 as bt,a5 as vt,N as nt,D as wt,a6 as St,z as Et}from"./mermaid-vendor-BVBgFwCv.js";import"./feature-graph-D-mwOi0p.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var X=function(){var n=s(function(f,i,a,d){for(a=a||{},d=f.length;d--;a[f[d]]=i);return a},"o"),t=[6,8,10,11,12,14,16,17,20,21],e=[1,9],l=[1,10],r=[1,11],h=[1,12],c=[1,13],g=[1,16],m=[1,17],p={trace:s(function(){},"trace"),yy:{},symbols_:{error:2,start:3,timeline:4,document:5,EOF:6,line:7,SPACE:8,statement:9,NEWLINE:10,title:11,acc_title:12,acc_title_value:13,acc_descr:14,acc_descr_value:15,acc_descr_multiline_value:16,section:17,period_statement:18,event_statement:19,period:20,event:21,$accept:0,$end:1},terminals_:{2:"error",4:"timeline",6:"EOF",8:"SPACE",10:"NEWLINE",11:"title",12:"acc_title",13:"acc_title_value",14:"acc_descr",15:"acc_descr_value",16:"acc_descr_multiline_value",17:"section",20:"period",21:"event"},productions_:[0,[3,3],[5,0],[5,2],[7,2],[7,1],[7,1],[7,1],[9,1],[9,2],[9,2],[9,1],[9,1],[9,1],[9,1],[18,1],[19,1]],performAction:s(function(i,a,d,u,y,o,S){var k=o.length-1;switch(y){case 1:return o[k-1];case 2:this.$=[];break;case 3:o[k-1].push(o[k]),this.$=o[k-1];break;case 4:case 5:this.$=o[k];break;case 6:case 7:this.$=[];break;case 8:u.getCommonDb().setDiagramTitle(o[k].substr(6)),this.$=o[k].substr(6);break;case 9:this.$=o[k].trim(),u.getCommonDb().setAccTitle(this.$);break;case 10:case 11:this.$=o[k].trim(),u.getCommonDb().setAccDescription(this.$);break;case 12:u.addSection(o[k].substr(8)),this.$=o[k].substr(8);break;case 15:u.addTask(o[k],0,""),this.$=o[k];break;case 16:u.addEvent(o[k].substr(2)),this.$=o[k];break}},"anonymous"),table:[{3:1,4:[1,2]},{1:[3]},n(t,[2,2],{5:3}),{6:[1,4],7:5,8:[1,6],9:7,10:[1,8],11:e,12:l,14:r,16:h,17:c,18:14,19:15,20:g,21:m},n(t,[2,7],{1:[2,1]}),n(t,[2,3]),{9:18,11:e,12:l,14:r,16:h,17:c,18:14,19:15,20:g,21:m},n(t,[2,5]),n(t,[2,6]),n(t,[2,8]),{13:[1,19]},{15:[1,20]},n(t,[2,11]),n(t,[2,12]),n(t,[2,13]),n(t,[2,14]),n(t,[2,15]),n(t,[2,16]),n(t,[2,4]),n(t,[2,9]),n(t,[2,10])],defaultActions:{},parseError:s(function(i,a){if(a.recoverable)this.trace(i);else{var d=new Error(i);throw d.hash=a,d}},"parseError"),parse:s(function(i){var a=this,d=[0],u=[],y=[null],o=[],S=this.table,k="",M=0,P=0,B=2,J=1,O=o.slice.call(arguments,1),_=Object.create(this.lexer),E={yy:{}};for(var v in this.yy)Object.prototype.hasOwnProperty.call(this.yy,v)&&(E.yy[v]=this.yy[v]);_.setInput(i,E.yy),E.yy.lexer=_,E.yy.parser=this,typeof _.yylloc>"u"&&(_.yylloc={});var L=_.yylloc;o.push(L);var A=_.options&&_.options.ranges;typeof E.yy.parseError=="function"?this.parseError=E.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;function R(I){d.length=d.length-2*I,y.length=y.length-I,o.length=o.length-I}s(R,"popStack");function z(){var I;return I=u.pop()||_.lex()||J,typeof I!="number"&&(I instanceof Array&&(u=I,I=u.pop()),I=a.symbols_[I]||I),I}s(z,"lex");for(var w,C,N,K,F={},j,$,et,G;;){if(C=d[d.length-1],this.defaultActions[C]?N=this.defaultActions[C]:((w===null||typeof w>"u")&&(w=z()),N=S[C]&&S[C][w]),typeof N>"u"||!N.length||!N[0]){var Q="";G=[];for(j in S[C])this.terminals_[j]&&j>B&&G.push("'"+this.terminals_[j]+"'");_.showPosition?Q="Parse error on line "+(M+1)+`:
import{_ as s,c as xt,l as T,d as q,a2 as kt,a3 as _t,a4 as bt,a5 as vt,N as nt,D as wt,a6 as St,z as Et}from"./mermaid-vendor-D0f_SE0h.js";import"./feature-graph-NODQb6qW.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var X=function(){var n=s(function(f,i,a,d){for(a=a||{},d=f.length;d--;a[f[d]]=i);return a},"o"),t=[6,8,10,11,12,14,16,17,20,21],e=[1,9],l=[1,10],r=[1,11],h=[1,12],c=[1,13],g=[1,16],m=[1,17],p={trace:s(function(){},"trace"),yy:{},symbols_:{error:2,start:3,timeline:4,document:5,EOF:6,line:7,SPACE:8,statement:9,NEWLINE:10,title:11,acc_title:12,acc_title_value:13,acc_descr:14,acc_descr_value:15,acc_descr_multiline_value:16,section:17,period_statement:18,event_statement:19,period:20,event:21,$accept:0,$end:1},terminals_:{2:"error",4:"timeline",6:"EOF",8:"SPACE",10:"NEWLINE",11:"title",12:"acc_title",13:"acc_title_value",14:"acc_descr",15:"acc_descr_value",16:"acc_descr_multiline_value",17:"section",20:"period",21:"event"},productions_:[0,[3,3],[5,0],[5,2],[7,2],[7,1],[7,1],[7,1],[9,1],[9,2],[9,2],[9,1],[9,1],[9,1],[9,1],[18,1],[19,1]],performAction:s(function(i,a,d,u,y,o,S){var k=o.length-1;switch(y){case 1:return o[k-1];case 2:this.$=[];break;case 3:o[k-1].push(o[k]),this.$=o[k-1];break;case 4:case 5:this.$=o[k];break;case 6:case 7:this.$=[];break;case 8:u.getCommonDb().setDiagramTitle(o[k].substr(6)),this.$=o[k].substr(6);break;case 9:this.$=o[k].trim(),u.getCommonDb().setAccTitle(this.$);break;case 10:case 11:this.$=o[k].trim(),u.getCommonDb().setAccDescription(this.$);break;case 12:u.addSection(o[k].substr(8)),this.$=o[k].substr(8);break;case 15:u.addTask(o[k],0,""),this.$=o[k];break;case 16:u.addEvent(o[k].substr(2)),this.$=o[k];break}},"anonymous"),table:[{3:1,4:[1,2]},{1:[3]},n(t,[2,2],{5:3}),{6:[1,4],7:5,8:[1,6],9:7,10:[1,8],11:e,12:l,14:r,16:h,17:c,18:14,19:15,20:g,21:m},n(t,[2,7],{1:[2,1]}),n(t,[2,3]),{9:18,11:e,12:l,14:r,16:h,17:c,18:14,19:15,20:g,21:m},n(t,[2,5]),n(t,[2,6]),n(t,[2,8]),{13:[1,19]},{15:[1,20]},n(t,[2,11]),n(t,[2,12]),n(t,[2,13]),n(t,[2,14]),n(t,[2,15]),n(t,[2,16]),n(t,[2,4]),n(t,[2,9]),n(t,[2,10])],defaultActions:{},parseError:s(function(i,a){if(a.recoverable)this.trace(i);else{var d=new Error(i);throw d.hash=a,d}},"parseError"),parse:s(function(i){var a=this,d=[0],u=[],y=[null],o=[],S=this.table,k="",M=0,P=0,B=2,J=1,O=o.slice.call(arguments,1),_=Object.create(this.lexer),E={yy:{}};for(var v in this.yy)Object.prototype.hasOwnProperty.call(this.yy,v)&&(E.yy[v]=this.yy[v]);_.setInput(i,E.yy),E.yy.lexer=_,E.yy.parser=this,typeof _.yylloc>"u"&&(_.yylloc={});var L=_.yylloc;o.push(L);var A=_.options&&_.options.ranges;typeof E.yy.parseError=="function"?this.parseError=E.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;function R(I){d.length=d.length-2*I,y.length=y.length-I,o.length=o.length-I}s(R,"popStack");function z(){var I;return I=u.pop()||_.lex()||J,typeof I!="number"&&(I instanceof Array&&(u=I,I=u.pop()),I=a.symbols_[I]||I),I}s(z,"lex");for(var w,C,N,K,F={},j,$,et,G;;){if(C=d[d.length-1],this.defaultActions[C]?N=this.defaultActions[C]:((w===null||typeof w>"u")&&(w=z()),N=S[C]&&S[C][w]),typeof N>"u"||!N.length||!N[0]){var Q="";G=[];for(j in S[C])this.terminals_[j]&&j>B&&G.push("'"+this.terminals_[j]+"'");_.showPosition?Q="Parse error on line "+(M+1)+`:
`+_.showPosition()+`
Expecting `+G.join(", ")+", got '"+(this.terminals_[w]||w)+"'":Q="Parse error on line "+(M+1)+": Unexpected "+(w==J?"end of input":"'"+(this.terminals_[w]||w)+"'"),this.parseError(Q,{text:_.match,token:this.terminals_[w]||w,line:_.yylineno,loc:L,expected:G})}if(N[0]instanceof Array&&N.length>1)throw new Error("Parse Error: multiple actions possible at state: "+C+", token: "+w);switch(N[0]){case 1:d.push(w),y.push(_.yytext),o.push(_.yylloc),d.push(N[1]),w=null,P=_.yyleng,k=_.yytext,M=_.yylineno,L=_.yylloc;break;case 2:if($=this.productions_[N[1]][1],F.$=y[y.length-$],F._$={first_line:o[o.length-($||1)].first_line,last_line:o[o.length-1].last_line,first_column:o[o.length-($||1)].first_column,last_column:o[o.length-1].last_column},A&&(F._$.range=[o[o.length-($||1)].range[0],o[o.length-1].range[1]]),K=this.performAction.apply(F,[k,P,M,E.yy,N[1],y,o].concat(O)),typeof K<"u")return K;$&&(d=d.slice(0,-1*$*2),y=y.slice(0,-1*$),o=o.slice(0,-1*$)),d.push(this.productions_[N[1]][0]),y.push(F.$),o.push(F._$),et=S[d[d.length-2]][d[d.length-1]],d.push(et);break;case 3:return!0}}return!0},"parse")},x=function(){var f={EOF:1,parseError:s(function(a,d){if(this.yy.parser)this.yy.parser.parseError(a,d);else throw new Error(a)},"parseError"),setInput:s(function(i,a){return this.yy=a||this.yy||{},this._input=i,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},"setInput"),input:s(function(){var i=this._input[0];this.yytext+=i,this.yyleng++,this.offset++,this.match+=i,this.matched+=i;var a=i.match(/(?:\r\n?|\n).*/g);return a?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),i},"input"),unput:s(function(i){var a=i.length,d=i.split(/(?:\r\n?|\n)/g);this._input=i+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-a),this.offset-=a;var u=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),d.length-1&&(this.yylineno-=d.length-1);var y=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:d?(d.length===u.length?this.yylloc.first_column:0)+u[u.length-d.length].length-d[0].length:this.yylloc.first_column-a},this.options.ranges&&(this.yylloc.range=[y[0],y[0]+this.yyleng-a]),this.yyleng=this.yytext.length,this},"unput"),more:s(function(){return this._more=!0,this},"more"),reject:s(function(){if(this.options.backtrack_lexer)this._backtrack=!0;else return this.parseError("Lexical error on line "+(this.yylineno+1)+`. You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).
`+this.showPosition(),{text:"",token:null,line:this.yylineno});return this},"reject"),less:s(function(i){this.unput(this.match.slice(i))},"less"),pastInput:s(function(){var i=this.matched.substr(0,this.matched.length-this.match.length);return(i.length>20?"...":"")+i.substr(-20).replace(/\n/g,"")},"pastInput"),upcomingInput:s(function(){var i=this.match;return i.length<20&&(i+=this._input.substr(0,20-i.length)),(i.substr(0,20)+(i.length>20?"...":"")).replace(/\n/g,"")},"upcomingInput"),showPosition:s(function(){var i=this.pastInput(),a=new Array(i.length+1).join("-");return i+this.upcomingInput()+`

View file

@ -8,18 +8,18 @@
<link rel="icon" type="image/png" href="favicon.png" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Lightrag</title>
<script type="module" crossorigin src="/webui/assets/index-CZQXgxUO.js"></script>
<script type="module" crossorigin src="/webui/assets/index-yRRg2BZk.js"></script>
<link rel="modulepreload" crossorigin href="/webui/assets/react-vendor-DEwriMA6.js">
<link rel="modulepreload" crossorigin href="/webui/assets/ui-vendor-CeCm8EER.js">
<link rel="modulepreload" crossorigin href="/webui/assets/graph-vendor-B-X5JegA.js">
<link rel="modulepreload" crossorigin href="/webui/assets/utils-vendor-BysuhMZA.js">
<link rel="modulepreload" crossorigin href="/webui/assets/feature-graph-D-mwOi0p.js">
<link rel="modulepreload" crossorigin href="/webui/assets/mermaid-vendor-BVBgFwCv.js">
<link rel="modulepreload" crossorigin href="/webui/assets/feature-graph-NODQb6qW.js">
<link rel="modulepreload" crossorigin href="/webui/assets/mermaid-vendor-D0f_SE0h.js">
<link rel="modulepreload" crossorigin href="/webui/assets/markdown-vendor-DmIvJdn7.js">
<link rel="modulepreload" crossorigin href="/webui/assets/feature-retrieval-BhEQ7fz5.js">
<link rel="modulepreload" crossorigin href="/webui/assets/feature-documents-CSExwz2a.js">
<link rel="modulepreload" crossorigin href="/webui/assets/feature-retrieval-DalFy9WB.js">
<link rel="modulepreload" crossorigin href="/webui/assets/feature-documents-oks3sUnM.js">
<link rel="stylesheet" crossorigin href="/webui/assets/feature-graph-BipNuM18.css">
<link rel="stylesheet" crossorigin href="/webui/assets/index-BwVd8c1u.css">
<link rel="stylesheet" crossorigin href="/webui/assets/index-DwO2XWaU.css">
</head>
<body>
<div id="root"></div>

View file

@ -41,19 +41,19 @@ class MilvusVectorDBStorage(BaseVectorStorage):
FieldSchema(
name="entity_name",
dtype=DataType.VARCHAR,
max_length=256,
max_length=512,
nullable=True,
),
FieldSchema(
name="entity_type",
dtype=DataType.VARCHAR,
max_length=64,
max_length=128,
nullable=True,
),
FieldSchema(
name="file_path",
dtype=DataType.VARCHAR,
max_length=512,
max_length=1024,
nullable=True,
),
]
@ -62,16 +62,16 @@ class MilvusVectorDBStorage(BaseVectorStorage):
elif "relationships" in self.namespace.lower():
specific_fields = [
FieldSchema(
name="src_id", dtype=DataType.VARCHAR, max_length=256, nullable=True
name="src_id", dtype=DataType.VARCHAR, max_length=512, nullable=True
),
FieldSchema(
name="tgt_id", dtype=DataType.VARCHAR, max_length=256, nullable=True
name="tgt_id", dtype=DataType.VARCHAR, max_length=512, nullable=True
),
FieldSchema(name="weight", dtype=DataType.DOUBLE, nullable=True),
FieldSchema(
name="file_path",
dtype=DataType.VARCHAR,
max_length=512,
max_length=1024,
nullable=True,
),
]
@ -88,7 +88,7 @@ class MilvusVectorDBStorage(BaseVectorStorage):
FieldSchema(
name="file_path",
dtype=DataType.VARCHAR,
max_length=512,
max_length=1024,
nullable=True,
),
]
@ -100,7 +100,7 @@ class MilvusVectorDBStorage(BaseVectorStorage):
FieldSchema(
name="file_path",
dtype=DataType.VARCHAR,
max_length=512,
max_length=1024,
nullable=True,
),
]

View file

@ -209,20 +209,20 @@ class PostgreSQLDB:
# Check column type
data_type = column_info.get("data_type")
if data_type == "timestamp with time zone":
logger.info(
if data_type == "timestamp without time zone":
logger.debug(
f"Column {table_name}.{column_name} is already timezone-aware, no migration needed"
)
continue
# Execute migration, explicitly specifying UTC timezone for interpreting original data
logger.info(
f"Migrating {table_name}.{column_name} to timezone-aware type"
f"Migrating {table_name}.{column_name} from {data_type} to TIMESTAMP(0) type"
)
migration_sql = f"""
ALTER TABLE {table_name}
ALTER COLUMN {column_name} TYPE TIMESTAMP(0) WITH TIME ZONE
USING {column_name} AT TIME ZONE 'UTC'
ALTER COLUMN {column_name} TYPE TIMESTAMP(0),
ALTER COLUMN {column_name} SET DEFAULT CURRENT_TIMESTAMP
"""
await self.execute(migration_sql)
@ -470,6 +470,115 @@ class PostgreSQLDB:
f"Failed to add llm_cache_list column to LIGHTRAG_DOC_CHUNKS: {e}"
)
async def _migrate_field_lengths(self):
"""Migrate database field lengths: entity_name, source_id, target_id, and file_path"""
# Define the field changes needed
field_migrations = [
{
"table": "LIGHTRAG_VDB_ENTITY",
"column": "entity_name",
"old_type": "character varying(255)",
"new_type": "VARCHAR(512)",
"description": "entity_name from 255 to 512",
},
{
"table": "LIGHTRAG_VDB_RELATION",
"column": "source_id",
"old_type": "character varying(256)",
"new_type": "VARCHAR(512)",
"description": "source_id from 256 to 512",
},
{
"table": "LIGHTRAG_VDB_RELATION",
"column": "target_id",
"old_type": "character varying(256)",
"new_type": "VARCHAR(512)",
"description": "target_id from 256 to 512",
},
{
"table": "LIGHTRAG_DOC_CHUNKS",
"column": "file_path",
"old_type": "character varying(256)",
"new_type": "TEXT",
"description": "file_path to TEXT NULL",
},
{
"table": "LIGHTRAG_VDB_CHUNKS",
"column": "file_path",
"old_type": "character varying(256)",
"new_type": "TEXT",
"description": "file_path to TEXT NULL",
},
]
for migration in field_migrations:
try:
# Check current column definition
check_column_sql = """
SELECT column_name, data_type, character_maximum_length, is_nullable
FROM information_schema.columns
WHERE table_name = $1 AND column_name = $2
"""
column_info = await self.query(
check_column_sql,
{
"table_name": migration["table"].lower(),
"column_name": migration["column"],
},
)
if not column_info:
logger.warning(
f"Column {migration['table']}.{migration['column']} does not exist, skipping migration"
)
continue
current_type = column_info.get("data_type", "").lower()
current_length = column_info.get("character_maximum_length")
# Check if migration is needed
needs_migration = False
if migration["column"] == "entity_name" and current_length == 255:
needs_migration = True
elif (
migration["column"] in ["source_id", "target_id"]
and current_length == 256
):
needs_migration = True
elif (
migration["column"] == "file_path"
and current_type == "character varying"
):
needs_migration = True
if needs_migration:
logger.info(
f"Migrating {migration['table']}.{migration['column']}: {migration['description']}"
)
# Execute the migration
alter_sql = f"""
ALTER TABLE {migration['table']}
ALTER COLUMN {migration['column']} TYPE {migration['new_type']}
"""
await self.execute(alter_sql)
logger.info(
f"Successfully migrated {migration['table']}.{migration['column']}"
)
else:
logger.debug(
f"Column {migration['table']}.{migration['column']} already has correct type, no migration needed"
)
except Exception as e:
# Log error but don't interrupt the process
logger.warning(
f"Failed to migrate {migration['table']}.{migration['column']}: {e}"
)
async def check_tables(self):
# First create all tables
for k, v in TABLES.items():
@ -582,6 +691,12 @@ class PostgreSQLDB:
f"PostgreSQL, Failed to migrate text chunks llm_cache_list field: {e}"
)
# Migrate field lengths for entity_name, source_id, target_id, and file_path
try:
await self._migrate_field_lengths()
except Exception as e:
logger.error(f"PostgreSQL, Failed to migrate field lengths: {e}")
async def query(
self,
sql: str,
@ -939,7 +1054,8 @@ class PGKVStorage(BaseKVStorage):
return
if is_namespace(self.namespace, NameSpace.KV_STORE_TEXT_CHUNKS):
current_time = datetime.datetime.now(timezone.utc)
# Get current UTC time and convert to naive datetime for database storage
current_time = datetime.datetime.now(timezone.utc).replace(tzinfo=None)
for k, v in data.items():
upsert_sql = SQL_TEMPLATES["upsert_text_chunk"]
_data = {
@ -1177,8 +1293,8 @@ class PGVectorStorage(BaseVectorStorage):
if not data:
return
# Get current time with UTC timezone
current_time = datetime.datetime.now(timezone.utc)
# Get current UTC time and convert to naive datetime for database storage
current_time = datetime.datetime.now(timezone.utc).replace(tzinfo=None)
list_data = [
{
"__id__": k,
@ -1374,6 +1490,15 @@ class PGVectorStorage(BaseVectorStorage):
class PGDocStatusStorage(DocStatusStorage):
db: PostgreSQLDB = field(default=None)
def _format_datetime_with_timezone(self, dt):
"""Convert datetime to ISO format string with timezone info"""
if dt is None:
return None
# If no timezone info, assume it's UTC time
if dt.tzinfo is None:
dt = dt.replace(tzinfo=timezone.utc)
return dt.isoformat()
async def initialize(self):
if self.db is None:
self.db = await ClientManager.get_client()
@ -1433,14 +1558,18 @@ class PGDocStatusStorage(DocStatusStorage):
except json.JSONDecodeError:
chunks_list = []
# Convert datetime objects to ISO format strings with timezone info
created_at = self._format_datetime_with_timezone(result[0]["created_at"])
updated_at = self._format_datetime_with_timezone(result[0]["updated_at"])
return dict(
content=result[0]["content"],
content_length=result[0]["content_length"],
content_summary=result[0]["content_summary"],
status=result[0]["status"],
chunks_count=result[0]["chunks_count"],
created_at=result[0]["created_at"],
updated_at=result[0]["updated_at"],
created_at=created_at,
updated_at=updated_at,
file_path=result[0]["file_path"],
chunks_list=chunks_list,
)
@ -1468,6 +1597,10 @@ class PGDocStatusStorage(DocStatusStorage):
except json.JSONDecodeError:
chunks_list = []
# Convert datetime objects to ISO format strings with timezone info
created_at = self._format_datetime_with_timezone(row["created_at"])
updated_at = self._format_datetime_with_timezone(row["updated_at"])
processed_results.append(
{
"content": row["content"],
@ -1475,8 +1608,8 @@ class PGDocStatusStorage(DocStatusStorage):
"content_summary": row["content_summary"],
"status": row["status"],
"chunks_count": row["chunks_count"],
"created_at": row["created_at"],
"updated_at": row["updated_at"],
"created_at": created_at,
"updated_at": updated_at,
"file_path": row["file_path"],
"chunks_list": chunks_list,
}
@ -1514,13 +1647,17 @@ class PGDocStatusStorage(DocStatusStorage):
except json.JSONDecodeError:
chunks_list = []
# Convert datetime objects to ISO format strings with timezone info
created_at = self._format_datetime_with_timezone(element["created_at"])
updated_at = self._format_datetime_with_timezone(element["updated_at"])
docs_by_status[element["id"]] = DocProcessingStatus(
content=element["content"],
content_summary=element["content_summary"],
content_length=element["content_length"],
status=element["status"],
created_at=element["created_at"],
updated_at=element["updated_at"],
created_at=created_at,
updated_at=updated_at,
chunks_count=element["chunks_count"],
file_path=element["file_path"],
chunks_list=chunks_list,
@ -1572,19 +1709,26 @@ class PGDocStatusStorage(DocStatusStorage):
return
def parse_datetime(dt_str):
"""Parse datetime and ensure it's stored as UTC time in database"""
if dt_str is None:
return None
if isinstance(dt_str, (datetime.date, datetime.datetime)):
# If it's a datetime object without timezone info, remove timezone info
# If it's a datetime object
if isinstance(dt_str, datetime.datetime):
# Remove timezone info, return naive datetime object
return dt_str.replace(tzinfo=None)
# If no timezone info, assume it's UTC
if dt_str.tzinfo is None:
dt_str = dt_str.replace(tzinfo=timezone.utc)
# Convert to UTC and remove timezone info for storage
return dt_str.astimezone(timezone.utc).replace(tzinfo=None)
return dt_str
try:
# Process ISO format string with timezone
dt = datetime.datetime.fromisoformat(dt_str)
# Remove timezone info, return naive datetime object
return dt.replace(tzinfo=None)
# If no timezone info, assume it's UTC
if dt.tzinfo is None:
dt = dt.replace(tzinfo=timezone.utc)
# Convert to UTC and remove timezone info for storage
return dt.astimezone(timezone.utc).replace(tzinfo=None)
except (ValueError, TypeError):
logger.warning(f"Unable to parse datetime string: {dt_str}")
return None
@ -2980,8 +3124,8 @@ TABLES = {
doc_name VARCHAR(1024),
content TEXT,
meta JSONB,
create_time TIMESTAMP(0),
update_time TIMESTAMP(0),
create_time TIMESTAMP(0) DEFAULT CURRENT_TIMESTAMP,
update_time TIMESTAMP(0) DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT LIGHTRAG_DOC_FULL_PK PRIMARY KEY (workspace, id)
)"""
},
@ -2993,10 +3137,10 @@ TABLES = {
chunk_order_index INTEGER,
tokens INTEGER,
content TEXT,
file_path VARCHAR(256),
file_path TEXT NULL,
llm_cache_list JSONB NULL DEFAULT '[]'::jsonb,
create_time TIMESTAMP(0) WITH TIME ZONE,
update_time TIMESTAMP(0) WITH TIME ZONE,
create_time TIMESTAMP(0) DEFAULT CURRENT_TIMESTAMP,
update_time TIMESTAMP(0) DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT LIGHTRAG_DOC_CHUNKS_PK PRIMARY KEY (workspace, id)
)"""
},
@ -3009,9 +3153,9 @@ TABLES = {
tokens INTEGER,
content TEXT,
content_vector VECTOR,
file_path VARCHAR(256),
create_time TIMESTAMP(0) WITH TIME ZONE,
update_time TIMESTAMP(0) WITH TIME ZONE,
file_path TEXT NULL,
create_time TIMESTAMP(0) DEFAULT CURRENT_TIMESTAMP,
update_time TIMESTAMP(0) DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT LIGHTRAG_VDB_CHUNKS_PK PRIMARY KEY (workspace, id)
)"""
},
@ -3019,11 +3163,11 @@ TABLES = {
"ddl": """CREATE TABLE LIGHTRAG_VDB_ENTITY (
id VARCHAR(255),
workspace VARCHAR(255),
entity_name VARCHAR(255),
entity_name VARCHAR(512),
content TEXT,
content_vector VECTOR,
create_time TIMESTAMP(0) WITH TIME ZONE,
update_time TIMESTAMP(0) WITH TIME ZONE,
create_time TIMESTAMP(0) DEFAULT CURRENT_TIMESTAMP,
update_time TIMESTAMP(0) DEFAULT CURRENT_TIMESTAMP,
chunk_ids VARCHAR(255)[] NULL,
file_path TEXT NULL,
CONSTRAINT LIGHTRAG_VDB_ENTITY_PK PRIMARY KEY (workspace, id)
@ -3033,12 +3177,12 @@ TABLES = {
"ddl": """CREATE TABLE LIGHTRAG_VDB_RELATION (
id VARCHAR(255),
workspace VARCHAR(255),
source_id VARCHAR(256),
target_id VARCHAR(256),
source_id VARCHAR(512),
target_id VARCHAR(512),
content TEXT,
content_vector VECTOR,
create_time TIMESTAMP(0) WITH TIME ZONE,
update_time TIMESTAMP(0) WITH TIME ZONE,
create_time TIMESTAMP(0) DEFAULT CURRENT_TIMESTAMP,
update_time TIMESTAMP(0) DEFAULT CURRENT_TIMESTAMP,
chunk_ids VARCHAR(255)[] NULL,
file_path TEXT NULL,
CONSTRAINT LIGHTRAG_VDB_RELATION_PK PRIMARY KEY (workspace, id)
@ -3053,7 +3197,7 @@ TABLES = {
return_value TEXT,
chunk_id VARCHAR(255) NULL,
create_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
update_time TIMESTAMP,
update_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT LIGHTRAG_LLM_CACHE_PK PRIMARY KEY (workspace, mode, id)
)"""
},
@ -3068,8 +3212,8 @@ TABLES = {
status varchar(64) NULL,
file_path TEXT NULL,
chunks_list JSONB NULL DEFAULT '[]'::jsonb,
created_at timestamp with time zone DEFAULT CURRENT_TIMESTAMP NULL,
updated_at timestamp with time zone DEFAULT CURRENT_TIMESTAMP NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT LIGHTRAG_DOC_STATUS_PK PRIMARY KEY (workspace, id)
)"""
},
@ -3084,11 +3228,13 @@ SQL_TEMPLATES = {
"get_by_id_text_chunks": """SELECT id, tokens, COALESCE(content, '') as content,
chunk_order_index, full_doc_id, file_path,
COALESCE(llm_cache_list, '[]'::jsonb) as llm_cache_list,
create_time, update_time
EXTRACT(EPOCH FROM create_time)::BIGINT as create_time,
EXTRACT(EPOCH FROM update_time)::BIGINT as update_time
FROM LIGHTRAG_DOC_CHUNKS WHERE workspace=$1 AND id=$2
""",
"get_by_id_llm_response_cache": """SELECT id, original_prompt, return_value, mode, chunk_id, cache_type,
create_time, update_time
EXTRACT(EPOCH FROM create_time)::BIGINT as create_time,
EXTRACT(EPOCH FROM update_time)::BIGINT as update_time
FROM LIGHTRAG_LLM_CACHE WHERE workspace=$1 AND id=$2
""",
"get_by_mode_id_llm_response_cache": """SELECT id, original_prompt, return_value, mode, chunk_id
@ -3100,11 +3246,13 @@ SQL_TEMPLATES = {
"get_by_ids_text_chunks": """SELECT id, tokens, COALESCE(content, '') as content,
chunk_order_index, full_doc_id, file_path,
COALESCE(llm_cache_list, '[]'::jsonb) as llm_cache_list,
create_time, update_time
EXTRACT(EPOCH FROM create_time)::BIGINT as create_time,
EXTRACT(EPOCH FROM update_time)::BIGINT as update_time
FROM LIGHTRAG_DOC_CHUNKS WHERE workspace=$1 AND id IN ({ids})
""",
"get_by_ids_llm_response_cache": """SELECT id, original_prompt, return_value, mode, chunk_id, cache_type,
create_time, update_time
EXTRACT(EPOCH FROM create_time)::BIGINT as create_time,
EXTRACT(EPOCH FROM update_time)::BIGINT as update_time
FROM LIGHTRAG_LLM_CACHE WHERE workspace=$1 AND id IN ({ids})
""",
"filter_keys": "SELECT id FROM {table_name} WHERE workspace=$1 AND id IN ({ids})",

View file

@ -27,7 +27,7 @@ config = configparser.ConfigParser()
config.read("config.ini", "utf-8")
# Constants for Redis connection pool
MAX_CONNECTIONS = 50
MAX_CONNECTIONS = 100
SOCKET_TIMEOUT = 5.0
SOCKET_CONNECT_TIMEOUT = 3.0
@ -36,24 +36,55 @@ class RedisConnectionManager:
"""Shared Redis connection pool manager to avoid creating multiple pools for the same Redis URI"""
_pools = {}
_pool_refs = {} # Track reference count for each pool
_lock = threading.Lock()
@classmethod
def get_pool(cls, redis_url: str) -> ConnectionPool:
"""Get or create a connection pool for the given Redis URL"""
if redis_url not in cls._pools:
with cls._lock:
if redis_url not in cls._pools:
cls._pools[redis_url] = ConnectionPool.from_url(
redis_url,
max_connections=MAX_CONNECTIONS,
decode_responses=True,
socket_timeout=SOCKET_TIMEOUT,
socket_connect_timeout=SOCKET_CONNECT_TIMEOUT,
)
logger.info(f"Created shared Redis connection pool for {redis_url}")
with cls._lock:
if redis_url not in cls._pools:
cls._pools[redis_url] = ConnectionPool.from_url(
redis_url,
max_connections=MAX_CONNECTIONS,
decode_responses=True,
socket_timeout=SOCKET_TIMEOUT,
socket_connect_timeout=SOCKET_CONNECT_TIMEOUT,
)
cls._pool_refs[redis_url] = 0
logger.info(f"Created shared Redis connection pool for {redis_url}")
# Increment reference count
cls._pool_refs[redis_url] += 1
logger.debug(
f"Redis pool {redis_url} reference count: {cls._pool_refs[redis_url]}"
)
return cls._pools[redis_url]
@classmethod
def release_pool(cls, redis_url: str):
"""Release a reference to the connection pool"""
with cls._lock:
if redis_url in cls._pool_refs:
cls._pool_refs[redis_url] -= 1
logger.debug(
f"Redis pool {redis_url} reference count: {cls._pool_refs[redis_url]}"
)
# If no more references, close the pool
if cls._pool_refs[redis_url] <= 0:
try:
cls._pools[redis_url].disconnect()
logger.info(
f"Closed Redis connection pool for {redis_url} (no more references)"
)
except Exception as e:
logger.error(f"Error closing Redis pool for {redis_url}: {e}")
finally:
del cls._pools[redis_url]
del cls._pool_refs[redis_url]
@classmethod
def close_all_pools(cls):
"""Close all connection pools (for cleanup)"""
@ -65,6 +96,7 @@ class RedisConnectionManager:
except Exception as e:
logger.error(f"Error closing Redis pool for {url}: {e}")
cls._pools.clear()
cls._pool_refs.clear()
@final
@ -94,35 +126,60 @@ class RedisKVStorage(BaseKVStorage):
logger.debug(f"Final namespace with workspace prefix: '{self.namespace}'")
# When workspace is empty, keep the original namespace unchanged
redis_url = os.environ.get(
self._redis_url = os.environ.get(
"REDIS_URI", config.get("redis", "uri", fallback="redis://localhost:6379")
)
# Use shared connection pool
self._pool = RedisConnectionManager.get_pool(redis_url)
self._redis = Redis(connection_pool=self._pool)
logger.info(
f"Initialized Redis KV storage for {self.namespace} using shared connection pool"
)
self._pool = None
self._redis = None
self._initialized = False
try:
# Use shared connection pool
self._pool = RedisConnectionManager.get_pool(self._redis_url)
self._redis = Redis(connection_pool=self._pool)
logger.info(
f"Initialized Redis KV storage for {self.namespace} using shared connection pool"
)
except Exception as e:
# Clean up on initialization failure
if self._redis_url:
RedisConnectionManager.release_pool(self._redis_url)
logger.error(f"Failed to initialize Redis KV storage: {e}")
raise
async def initialize(self):
"""Initialize Redis connection and migrate legacy cache structure if needed"""
if self._initialized:
return
# Test connection
try:
async with self._get_redis_connection() as redis:
await redis.ping()
logger.info(f"Connected to Redis for namespace {self.namespace}")
self._initialized = True
except Exception as e:
logger.error(f"Failed to connect to Redis: {e}")
# Clean up on connection failure
await self.close()
raise
# Migrate legacy cache structure if this is a cache namespace
if self.namespace.endswith("_cache"):
await self._migrate_legacy_cache_structure()
try:
await self._migrate_legacy_cache_structure()
except Exception as e:
logger.error(f"Failed to migrate legacy cache structure: {e}")
# Don't fail initialization for migration errors, just log them
@asynccontextmanager
async def _get_redis_connection(self):
"""Safe context manager for Redis operations."""
if not self._redis:
raise ConnectionError("Redis connection not initialized")
try:
# Use the existing Redis instance with shared pool
yield self._redis
except ConnectionError as e:
logger.error(f"Redis connection error in {self.namespace}: {e}")
@ -137,11 +194,23 @@ class RedisKVStorage(BaseKVStorage):
raise
async def close(self):
"""Close the Redis connection pool to prevent resource leaks."""
"""Close the Redis connection and release pool reference to prevent resource leaks."""
if hasattr(self, "_redis") and self._redis:
await self._redis.close()
await self._pool.disconnect()
logger.debug(f"Closed Redis connection pool for {self.namespace}")
try:
await self._redis.close()
logger.debug(f"Closed Redis connection for {self.namespace}")
except Exception as e:
logger.error(f"Error closing Redis connection: {e}")
finally:
self._redis = None
# Release the pool reference (will auto-close pool if no more references)
if hasattr(self, "_redis_url") and self._redis_url:
RedisConnectionManager.release_pool(self._redis_url)
self._pool = None
logger.debug(
f"Released Redis connection pool reference for {self.namespace}"
)
async def __aenter__(self):
"""Support for async context manager."""
@ -507,32 +576,53 @@ class RedisDocStatusStorage(DocStatusStorage):
logger.debug(f"Final namespace with workspace prefix: '{self.namespace}'")
# When workspace is empty, keep the original namespace unchanged
redis_url = os.environ.get(
self._redis_url = os.environ.get(
"REDIS_URI", config.get("redis", "uri", fallback="redis://localhost:6379")
)
# Use shared connection pool
self._pool = RedisConnectionManager.get_pool(redis_url)
self._redis = Redis(connection_pool=self._pool)
logger.info(
f"Initialized Redis doc status storage for {self.namespace} using shared connection pool"
)
self._pool = None
self._redis = None
self._initialized = False
try:
# Use shared connection pool
self._pool = RedisConnectionManager.get_pool(self._redis_url)
self._redis = Redis(connection_pool=self._pool)
logger.info(
f"Initialized Redis doc status storage for {self.namespace} using shared connection pool"
)
except Exception as e:
# Clean up on initialization failure
if self._redis_url:
RedisConnectionManager.release_pool(self._redis_url)
logger.error(f"Failed to initialize Redis doc status storage: {e}")
raise
async def initialize(self):
"""Initialize Redis connection"""
if self._initialized:
return
try:
async with self._get_redis_connection() as redis:
await redis.ping()
logger.info(
f"Connected to Redis for doc status namespace {self.namespace}"
)
self._initialized = True
except Exception as e:
logger.error(f"Failed to connect to Redis for doc status: {e}")
# Clean up on connection failure
await self.close()
raise
@asynccontextmanager
async def _get_redis_connection(self):
"""Safe context manager for Redis operations."""
if not self._redis:
raise ConnectionError("Redis connection not initialized")
try:
# Use the existing Redis instance with shared pool
yield self._redis
except ConnectionError as e:
logger.error(f"Redis connection error in doc status {self.namespace}: {e}")
@ -547,10 +637,23 @@ class RedisDocStatusStorage(DocStatusStorage):
raise
async def close(self):
"""Close the Redis connection."""
"""Close the Redis connection and release pool reference to prevent resource leaks."""
if hasattr(self, "_redis") and self._redis:
await self._redis.close()
logger.debug(f"Closed Redis connection for doc status {self.namespace}")
try:
await self._redis.close()
logger.debug(f"Closed Redis connection for doc status {self.namespace}")
except Exception as e:
logger.error(f"Error closing Redis connection: {e}")
finally:
self._redis = None
# Release the pool reference (will auto-close pool if no more references)
if hasattr(self, "_redis_url") and self._redis_url:
RedisConnectionManager.release_pool(self._redis_url)
self._pool = None
logger.debug(
f"Released Redis connection pool reference for doc status {self.namespace}"
)
async def __aenter__(self):
"""Support for async context manager."""

View file

@ -1,23 +1,46 @@
import os
import sys
import asyncio
import multiprocessing as mp
from multiprocessing.synchronize import Lock as ProcessLock
from multiprocessing import Manager
from typing import Any, Dict, Optional, Union, TypeVar, Generic
import time
import logging
from typing import Any, Dict, List, Optional, Union, TypeVar, Generic
# Define a direct print function for critical logs that must be visible in all processes
def direct_log(message, level="INFO", enable_output: bool = True):
def direct_log(message, enable_output: bool = False, level: str = "DEBUG"):
"""
Log a message directly to stderr to ensure visibility in all processes,
including the Gunicorn master process.
Args:
message: The message to log
level: Log level (default: "INFO")
level: Log level (default: "DEBUG")
enable_output: Whether to actually output the log (default: True)
"""
if enable_output:
# Get the current logger level from the lightrag logger
try:
from lightrag.utils import logger
current_level = logger.getEffectiveLevel()
except ImportError:
# Fallback if lightrag.utils is not available
current_level = 20 # INFO
# Convert string level to numeric level for comparison
level_mapping = {
"DEBUG": 10, # DEBUG
"INFO": 20, # INFO
"WARNING": 30, # WARNING
"ERROR": 40, # ERROR
"CRITICAL": 50, # CRITICAL
}
message_level = level_mapping.get(level.upper(), logging.DEBUG)
# print(f"Diret_log: {level.upper()} {message_level} ? {current_level}", file=sys.stderr, flush=True)
if enable_output or (message_level >= current_level):
print(f"{level}: {message}", file=sys.stderr, flush=True)
@ -27,6 +50,23 @@ LockType = Union[ProcessLock, asyncio.Lock]
_is_multiprocess = None
_workers = None
_manager = None
# Global singleton data for multi-process keyed locks
_lock_registry: Optional[Dict[str, mp.synchronize.Lock]] = None
_lock_registry_count: Optional[Dict[str, int]] = None
_lock_cleanup_data: Optional[Dict[str, time.time]] = None
_registry_guard = None
# Timeout for keyed locks in seconds (Default 300)
CLEANUP_KEYED_LOCKS_AFTER_SECONDS = 300
# Cleanup pending list threshold for triggering cleanup (Default 500)
CLEANUP_THRESHOLD = 500
# Minimum interval between cleanup operations in seconds (Default 30)
MIN_CLEANUP_INTERVAL_SECONDS = 30
# Track the earliest cleanup time for efficient cleanup triggering (multiprocess locks only)
_earliest_mp_cleanup_time: Optional[float] = None
# Track the last cleanup time to enforce minimum interval (multiprocess locks only)
_last_mp_cleanup_time: Optional[float] = None
_initialized = None
# shared data for storage across processes
@ -40,10 +80,37 @@ _internal_lock: Optional[LockType] = None
_pipeline_status_lock: Optional[LockType] = None
_graph_db_lock: Optional[LockType] = None
_data_init_lock: Optional[LockType] = None
# Manager for all keyed locks
_storage_keyed_lock: Optional["KeyedUnifiedLock"] = None
# async locks for coroutine synchronization in multiprocess mode
_async_locks: Optional[Dict[str, asyncio.Lock]] = None
DEBUG_LOCKS = False
_debug_n_locks_acquired: int = 0
def inc_debug_n_locks_acquired():
global _debug_n_locks_acquired
if DEBUG_LOCKS:
_debug_n_locks_acquired += 1
print(f"DEBUG: Keyed Lock acquired, total: {_debug_n_locks_acquired:>5}")
def dec_debug_n_locks_acquired():
global _debug_n_locks_acquired
if DEBUG_LOCKS:
if _debug_n_locks_acquired > 0:
_debug_n_locks_acquired -= 1
print(f"DEBUG: Keyed Lock released, total: {_debug_n_locks_acquired:>5}")
else:
raise RuntimeError("Attempting to release lock when no locks are acquired")
def get_debug_n_locks_acquired():
global _debug_n_locks_acquired
return _debug_n_locks_acquired
class UnifiedLock(Generic[T]):
"""Provide a unified lock interface type for asyncio.Lock and multiprocessing.Lock"""
@ -65,17 +132,8 @@ class UnifiedLock(Generic[T]):
async def __aenter__(self) -> "UnifiedLock[T]":
try:
# direct_log(
# f"== Lock == Process {self._pid}: Acquiring lock '{self._name}' (async={self._is_async})",
# enable_output=self._enable_logging,
# )
# If in multiprocess mode and async lock exists, acquire it first
if not self._is_async and self._async_lock is not None:
# direct_log(
# f"== Lock == Process {self._pid}: Acquiring async lock for '{self._name}'",
# enable_output=self._enable_logging,
# )
await self._async_lock.acquire()
direct_log(
f"== Lock == Process {self._pid}: Async lock for '{self._name}' acquired",
@ -210,6 +268,557 @@ class UnifiedLock(Generic[T]):
)
raise
def locked(self) -> bool:
if self._is_async:
return self._lock.locked()
else:
return self._lock.locked()
def _get_combined_key(factory_name: str, key: str) -> str:
"""Return the combined key for the factory and key."""
return f"{factory_name}:{key}"
def _perform_lock_cleanup(
lock_type: str,
cleanup_data: Dict[str, float],
lock_registry: Optional[Dict[str, Any]],
lock_count: Optional[Dict[str, int]],
earliest_cleanup_time: Optional[float],
last_cleanup_time: Optional[float],
current_time: float,
threshold_check: bool = True,
) -> tuple[int, Optional[float], Optional[float]]:
"""
Generic lock cleanup function to unify cleanup logic for both multiprocess and async locks.
Args:
lock_type: Lock type identifier ("mp" or "async")
cleanup_data: Cleanup data dictionary
lock_registry: Lock registry dictionary (can be None for async locks)
lock_count: Lock count dictionary (can be None for async locks)
earliest_cleanup_time: Earliest cleanup time
last_cleanup_time: Last cleanup time
current_time: Current time
threshold_check: Whether to check threshold condition (default True, set to False in cleanup_expired_locks)
Returns:
tuple: (cleaned_count, new_earliest_time, new_last_cleanup_time)
"""
if len(cleanup_data) == 0:
return 0, earliest_cleanup_time, last_cleanup_time
# If threshold check is needed and threshold not reached, return directly
if threshold_check and len(cleanup_data) < CLEANUP_THRESHOLD:
return 0, earliest_cleanup_time, last_cleanup_time
# Time rollback detection
if last_cleanup_time is not None and current_time < last_cleanup_time:
direct_log(
f"== {lock_type} Lock == Time rollback detected, resetting cleanup time",
level="WARNING",
enable_output=False,
)
last_cleanup_time = None
# Check cleanup conditions
has_expired_locks = (
earliest_cleanup_time is not None
and current_time - earliest_cleanup_time > CLEANUP_KEYED_LOCKS_AFTER_SECONDS
)
interval_satisfied = (
last_cleanup_time is None
or current_time - last_cleanup_time > MIN_CLEANUP_INTERVAL_SECONDS
)
if not (has_expired_locks and interval_satisfied):
return 0, earliest_cleanup_time, last_cleanup_time
try:
cleaned_count = 0
new_earliest_time = None
# Calculate total count before cleanup
total_cleanup_len = len(cleanup_data)
# Perform cleanup operation
for cleanup_key, cleanup_time in list(cleanup_data.items()):
if current_time - cleanup_time > CLEANUP_KEYED_LOCKS_AFTER_SECONDS:
# Remove from cleanup data
cleanup_data.pop(cleanup_key, None)
# Remove from lock registry if exists
if lock_registry is not None:
lock_registry.pop(cleanup_key, None)
if lock_count is not None:
lock_count.pop(cleanup_key, None)
cleaned_count += 1
else:
# Track the earliest time among remaining locks
if new_earliest_time is None or cleanup_time < new_earliest_time:
new_earliest_time = cleanup_time
# Update state only after successful cleanup
if cleaned_count > 0:
new_last_cleanup_time = current_time
# Log cleanup results
next_cleanup_in = max(
(new_earliest_time + CLEANUP_KEYED_LOCKS_AFTER_SECONDS - current_time)
if new_earliest_time
else float("inf"),
MIN_CLEANUP_INTERVAL_SECONDS,
)
if lock_type == "async":
direct_log(
f"== {lock_type} Lock == Cleaned up {cleaned_count}/{total_cleanup_len} expired {lock_type} locks, "
f"next cleanup in {next_cleanup_in:.1f}s",
enable_output=False,
level="INFO",
)
else:
direct_log(
f"== {lock_type} Lock == Cleaned up {cleaned_count}/{total_cleanup_len} expired locks, "
f"next cleanup in {next_cleanup_in:.1f}s",
enable_output=False,
level="INFO",
)
return cleaned_count, new_earliest_time, new_last_cleanup_time
else:
return 0, earliest_cleanup_time, last_cleanup_time
except Exception as e:
direct_log(
f"== {lock_type} Lock == Cleanup failed: {e}",
level="ERROR",
enable_output=False,
)
return 0, earliest_cleanup_time, last_cleanup_time
def _get_or_create_shared_raw_mp_lock(
factory_name: str, key: str
) -> Optional[mp.synchronize.Lock]:
"""Return the *singleton* manager.Lock() proxy for keyed lock, creating if needed."""
if not _is_multiprocess:
return None
with _registry_guard:
combined_key = _get_combined_key(factory_name, key)
raw = _lock_registry.get(combined_key)
count = _lock_registry_count.get(combined_key)
if raw is None:
raw = _manager.Lock()
_lock_registry[combined_key] = raw
count = 0
else:
if count is None:
raise RuntimeError(
f"Shared-Data lock registry for {factory_name} is corrupted for key {key}"
)
if (
count == 0 and combined_key in _lock_cleanup_data
): # Reusing an key waiting for cleanup, remove it from cleanup list
_lock_cleanup_data.pop(combined_key)
count += 1
_lock_registry_count[combined_key] = count
return raw
def _release_shared_raw_mp_lock(factory_name: str, key: str):
"""Release the *singleton* manager.Lock() proxy for *key*."""
if not _is_multiprocess:
return
global _earliest_mp_cleanup_time, _last_mp_cleanup_time
with _registry_guard:
combined_key = _get_combined_key(factory_name, key)
raw = _lock_registry.get(combined_key)
count = _lock_registry_count.get(combined_key)
if raw is None and count is None:
return
elif raw is None or count is None:
raise RuntimeError(
f"Shared-Data lock registry for {factory_name} is corrupted for key {key}"
)
count -= 1
if count < 0:
raise RuntimeError(
f"Attempting to release lock for {key} more times than it was acquired"
)
_lock_registry_count[combined_key] = count
current_time = time.time()
if count == 0:
_lock_cleanup_data[combined_key] = current_time
# Update earliest multiprocess cleanup time (only when earlier)
if (
_earliest_mp_cleanup_time is None
or current_time < _earliest_mp_cleanup_time
):
_earliest_mp_cleanup_time = current_time
# Use generic cleanup function
cleaned_count, new_earliest_time, new_last_cleanup_time = _perform_lock_cleanup(
lock_type="mp",
cleanup_data=_lock_cleanup_data,
lock_registry=_lock_registry,
lock_count=_lock_registry_count,
earliest_cleanup_time=_earliest_mp_cleanup_time,
last_cleanup_time=_last_mp_cleanup_time,
current_time=current_time,
threshold_check=True,
)
# Update global state if cleanup was performed
if cleaned_count > 0:
_earliest_mp_cleanup_time = new_earliest_time
_last_mp_cleanup_time = new_last_cleanup_time
class KeyedUnifiedLock:
"""
Manager for unified keyed locks, supporting both single and multi-process
Keeps only a table of async keyed locks locally
Fetches the multi-process keyed lock on every acquire
Builds a fresh `UnifiedLock` each time, so `enable_logging`
(or future options) can vary per call.
Supports dynamic namespaces specified at lock usage time
"""
def __init__(self, *, default_enable_logging: bool = True) -> None:
self._default_enable_logging = default_enable_logging
self._async_lock: Dict[str, asyncio.Lock] = {} # local keyed locks
self._async_lock_count: Dict[
str, int
] = {} # local keyed locks referenced count
self._async_lock_cleanup_data: Dict[
str, time.time
] = {} # local keyed locks timeout
self._mp_locks: Dict[
str, mp.synchronize.Lock
] = {} # multi-process lock proxies
self._earliest_async_cleanup_time: Optional[float] = (
None # track earliest async cleanup time
)
self._last_async_cleanup_time: Optional[float] = (
None # track last async cleanup time for minimum interval
)
def __call__(
self, namespace: str, keys: list[str], *, enable_logging: Optional[bool] = None
):
"""
Ergonomic helper so you can write:
async with storage_keyed_lock("namespace", ["key1", "key2"]):
...
"""
if enable_logging is None:
enable_logging = self._default_enable_logging
return _KeyedLockContext(
self,
namespace=namespace,
keys=keys,
enable_logging=enable_logging,
)
def _get_or_create_async_lock(self, combined_key: str) -> asyncio.Lock:
async_lock = self._async_lock.get(combined_key)
count = self._async_lock_count.get(combined_key, 0)
if async_lock is None:
async_lock = asyncio.Lock()
self._async_lock[combined_key] = async_lock
elif count == 0 and combined_key in self._async_lock_cleanup_data:
self._async_lock_cleanup_data.pop(combined_key)
count += 1
self._async_lock_count[combined_key] = count
return async_lock
def _release_async_lock(self, combined_key: str):
count = self._async_lock_count.get(combined_key, 0)
count -= 1
current_time = time.time()
if count == 0:
self._async_lock_cleanup_data[combined_key] = current_time
# Update earliest async cleanup time (only when earlier)
if (
self._earliest_async_cleanup_time is None
or current_time < self._earliest_async_cleanup_time
):
self._earliest_async_cleanup_time = current_time
self._async_lock_count[combined_key] = count
# Use generic cleanup function
cleaned_count, new_earliest_time, new_last_cleanup_time = _perform_lock_cleanup(
lock_type="async",
cleanup_data=self._async_lock_cleanup_data,
lock_registry=self._async_lock,
lock_count=self._async_lock_count,
earliest_cleanup_time=self._earliest_async_cleanup_time,
last_cleanup_time=self._last_async_cleanup_time,
current_time=current_time,
threshold_check=True,
)
# Update instance state if cleanup was performed
if cleaned_count > 0:
self._earliest_async_cleanup_time = new_earliest_time
self._last_async_cleanup_time = new_last_cleanup_time
def _get_lock_for_key(
self, namespace: str, key: str, enable_logging: bool = False
) -> UnifiedLock:
# 1. Create combined key for this namespace:key combination
combined_key = _get_combined_key(namespace, key)
# 2. get (or create) the perprocess async gate for this combined key
# Is synchronous, so no need to acquire a lock
async_lock = self._get_or_create_async_lock(combined_key)
# 3. fetch the shared raw lock
raw_lock = _get_or_create_shared_raw_mp_lock(namespace, key)
is_multiprocess = raw_lock is not None
if not is_multiprocess:
raw_lock = async_lock
# 4. build a *fresh* UnifiedLock with the chosen logging flag
if is_multiprocess:
return UnifiedLock(
lock=raw_lock,
is_async=False, # manager.Lock is synchronous
name=combined_key,
enable_logging=enable_logging,
async_lock=async_lock, # prevents eventloop blocking
)
else:
return UnifiedLock(
lock=raw_lock,
is_async=True,
name=combined_key,
enable_logging=enable_logging,
async_lock=None, # No need for async lock in single process mode
)
def _release_lock_for_key(self, namespace: str, key: str):
combined_key = _get_combined_key(namespace, key)
self._release_async_lock(combined_key)
_release_shared_raw_mp_lock(namespace, key)
def cleanup_expired_locks(self) -> Dict[str, Any]:
"""
Cleanup expired locks for both async and multiprocess locks following the same
conditions as _release_shared_raw_mp_lock and _release_async_lock functions.
Only performs cleanup when both has_expired_locks and interval_satisfied conditions are met
to avoid too frequent cleanup operations.
Since async and multiprocess locks work together, this method cleans up
both types of expired locks and returns comprehensive statistics.
Returns:
Dict containing cleanup statistics and current status:
{
"process_id": 12345,
"cleanup_performed": {
"mp_cleaned": 5,
"async_cleaned": 3
},
"current_status": {
"total_mp_locks": 10,
"pending_mp_cleanup": 2,
"total_async_locks": 8,
"pending_async_cleanup": 1
}
}
"""
global _lock_registry, _lock_registry_count, _lock_cleanup_data
global _registry_guard, _earliest_mp_cleanup_time, _last_mp_cleanup_time
cleanup_stats = {"mp_cleaned": 0, "async_cleaned": 0}
current_time = time.time()
# 1. Cleanup multiprocess locks using generic function
if (
_is_multiprocess
and _lock_registry is not None
and _registry_guard is not None
):
try:
with _registry_guard:
if _lock_cleanup_data is not None:
# Use generic cleanup function without threshold check
cleaned_count, new_earliest_time, new_last_cleanup_time = (
_perform_lock_cleanup(
lock_type="mp",
cleanup_data=_lock_cleanup_data,
lock_registry=_lock_registry,
lock_count=_lock_registry_count,
earliest_cleanup_time=_earliest_mp_cleanup_time,
last_cleanup_time=_last_mp_cleanup_time,
current_time=current_time,
threshold_check=False, # Force cleanup in cleanup_expired_locks
)
)
# Update global state if cleanup was performed
if cleaned_count > 0:
_earliest_mp_cleanup_time = new_earliest_time
_last_mp_cleanup_time = new_last_cleanup_time
cleanup_stats["mp_cleaned"] = cleaned_count
except Exception as e:
direct_log(
f"Error during multiprocess lock cleanup: {e}",
level="ERROR",
enable_output=False,
)
# 2. Cleanup async locks using generic function
try:
# Use generic cleanup function without threshold check
cleaned_count, new_earliest_time, new_last_cleanup_time = (
_perform_lock_cleanup(
lock_type="async",
cleanup_data=self._async_lock_cleanup_data,
lock_registry=self._async_lock,
lock_count=self._async_lock_count,
earliest_cleanup_time=self._earliest_async_cleanup_time,
last_cleanup_time=self._last_async_cleanup_time,
current_time=current_time,
threshold_check=False, # Force cleanup in cleanup_expired_locks
)
)
# Update instance state if cleanup was performed
if cleaned_count > 0:
self._earliest_async_cleanup_time = new_earliest_time
self._last_async_cleanup_time = new_last_cleanup_time
cleanup_stats["async_cleaned"] = cleaned_count
except Exception as e:
direct_log(
f"Error during async lock cleanup: {e}",
level="ERROR",
enable_output=False,
)
# 3. Get current status after cleanup
current_status = self.get_lock_status()
return {
"process_id": os.getpid(),
"cleanup_performed": cleanup_stats,
"current_status": current_status,
}
def get_lock_status(self) -> Dict[str, int]:
"""
Get current status of both async and multiprocess locks.
Returns comprehensive lock counts for both types of locks since
they work together in the keyed lock system.
Returns:
Dict containing lock counts:
{
"total_mp_locks": 10,
"pending_mp_cleanup": 2,
"total_async_locks": 8,
"pending_async_cleanup": 1
}
"""
global _lock_registry_count, _lock_cleanup_data, _registry_guard
status = {
"total_mp_locks": 0,
"pending_mp_cleanup": 0,
"total_async_locks": 0,
"pending_async_cleanup": 0,
}
try:
# Count multiprocess locks
if _is_multiprocess and _lock_registry_count is not None:
if _registry_guard is not None:
with _registry_guard:
status["total_mp_locks"] = len(_lock_registry_count)
if _lock_cleanup_data is not None:
status["pending_mp_cleanup"] = len(_lock_cleanup_data)
# Count async locks
status["total_async_locks"] = len(self._async_lock_count)
status["pending_async_cleanup"] = len(self._async_lock_cleanup_data)
except Exception as e:
direct_log(
f"Error getting keyed lock status: {e}",
level="ERROR",
enable_output=False,
)
return status
class _KeyedLockContext:
def __init__(
self,
parent: KeyedUnifiedLock,
namespace: str,
keys: list[str],
enable_logging: bool,
) -> None:
self._parent = parent
self._namespace = namespace
# The sorting is critical to ensure proper lock and release order
# to avoid deadlocks
self._keys = sorted(keys)
self._enable_logging = (
enable_logging
if enable_logging is not None
else parent._default_enable_logging
)
self._ul: Optional[List["UnifiedLock"]] = None # set in __aenter__
# ----- enter -----
async def __aenter__(self):
if self._ul is not None:
raise RuntimeError("KeyedUnifiedLock already acquired in current context")
# acquire locks for all keys in the namespace
self._ul = []
for key in self._keys:
lock = self._parent._get_lock_for_key(
self._namespace, key, enable_logging=self._enable_logging
)
await lock.__aenter__()
inc_debug_n_locks_acquired()
self._ul.append(lock)
return self
# ----- exit -----
async def __aexit__(self, exc_type, exc, tb):
# The UnifiedLock takes care of proper release order
for ul, key in zip(reversed(self._ul), reversed(self._keys)):
await ul.__aexit__(exc_type, exc, tb)
self._parent._release_lock_for_key(self._namespace, key)
dec_debug_n_locks_acquired()
self._ul = None
def get_internal_lock(enable_logging: bool = False) -> UnifiedLock:
"""return unified storage lock for data consistency"""
@ -259,6 +868,18 @@ def get_graph_db_lock(enable_logging: bool = False) -> UnifiedLock:
)
def get_storage_keyed_lock(
keys: str | list[str], namespace: str = "default", enable_logging: bool = False
) -> _KeyedLockContext:
"""Return unified storage keyed lock for ensuring atomic operations across different namespaces"""
global _storage_keyed_lock
if _storage_keyed_lock is None:
raise RuntimeError("Shared-Data is not initialized")
if isinstance(keys, str):
keys = [keys]
return _storage_keyed_lock(namespace, keys, enable_logging=enable_logging)
def get_data_init_lock(enable_logging: bool = False) -> UnifiedLock:
"""return unified data initialization lock for ensuring atomic data initialization"""
async_lock = _async_locks.get("data_init_lock") if _is_multiprocess else None
@ -271,6 +892,61 @@ def get_data_init_lock(enable_logging: bool = False) -> UnifiedLock:
)
def cleanup_keyed_lock() -> Dict[str, Any]:
"""
Force cleanup of expired keyed locks and return comprehensive status information.
This function actively cleans up expired locks for both async and multiprocess locks,
then returns detailed statistics about the cleanup operation and current lock status.
Returns:
Same as cleanup_expired_locks in KeyedUnifiedLock
"""
global _storage_keyed_lock
# Check if shared storage is initialized
if not _initialized or _storage_keyed_lock is None:
return {
"process_id": os.getpid(),
"cleanup_performed": {"mp_cleaned": 0, "async_cleaned": 0},
"current_status": {
"total_mp_locks": 0,
"pending_mp_cleanup": 0,
"total_async_locks": 0,
"pending_async_cleanup": 0,
},
}
return _storage_keyed_lock.cleanup_expired_locks()
def get_keyed_lock_status() -> Dict[str, Any]:
"""
Get current status of keyed locks without performing cleanup.
This function provides a read-only view of the current lock counts
for both multiprocess and async locks, including pending cleanup counts.
Returns:
Same as get_lock_status in KeyedUnifiedLock
"""
global _storage_keyed_lock
# Check if shared storage is initialized
if not _initialized or _storage_keyed_lock is None:
return {
"process_id": os.getpid(),
"total_mp_locks": 0,
"pending_mp_cleanup": 0,
"total_async_locks": 0,
"pending_async_cleanup": 0,
}
status = _storage_keyed_lock.get_lock_status()
status["process_id"] = os.getpid()
return status
def initialize_share_data(workers: int = 1):
"""
Initialize shared storage data for single or multi-process mode.
@ -294,6 +970,10 @@ def initialize_share_data(workers: int = 1):
_workers, \
_is_multiprocess, \
_storage_lock, \
_lock_registry, \
_lock_registry_count, \
_lock_cleanup_data, \
_registry_guard, \
_internal_lock, \
_pipeline_status_lock, \
_graph_db_lock, \
@ -302,7 +982,10 @@ def initialize_share_data(workers: int = 1):
_init_flags, \
_initialized, \
_update_flags, \
_async_locks
_async_locks, \
_storage_keyed_lock, \
_earliest_mp_cleanup_time, \
_last_mp_cleanup_time
# Check if already initialized
if _initialized:
@ -316,6 +999,10 @@ def initialize_share_data(workers: int = 1):
if workers > 1:
_is_multiprocess = True
_manager = Manager()
_lock_registry = _manager.dict()
_lock_registry_count = _manager.dict()
_lock_cleanup_data = _manager.dict()
_registry_guard = _manager.RLock()
_internal_lock = _manager.Lock()
_storage_lock = _manager.Lock()
_pipeline_status_lock = _manager.Lock()
@ -325,6 +1012,8 @@ def initialize_share_data(workers: int = 1):
_init_flags = _manager.dict()
_update_flags = _manager.dict()
_storage_keyed_lock = KeyedUnifiedLock()
# Initialize async locks for multiprocess mode
_async_locks = {
"internal_lock": asyncio.Lock(),
@ -348,8 +1037,14 @@ def initialize_share_data(workers: int = 1):
_init_flags = {}
_update_flags = {}
_async_locks = None # No need for async locks in single process mode
_storage_keyed_lock = KeyedUnifiedLock()
direct_log(f"Process {os.getpid()} Shared-Data created for Single Process")
# Initialize multiprocess cleanup times
_earliest_mp_cleanup_time = None
_last_mp_cleanup_time = None
# Mark as initialized
_initialized = True

View file

@ -205,7 +205,7 @@ class LightRAG:
"""Batch size for embedding computations."""
embedding_func_max_async: int = field(
default=int(os.getenv("EMBEDDING_FUNC_MAX_ASYNC", 16))
default=int(os.getenv("EMBEDDING_FUNC_MAX_ASYNC", 8))
)
"""Maximum number of concurrent embedding function calls."""
@ -231,7 +231,7 @@ class LightRAG:
llm_model_name: str = field(default="gpt-4o-mini")
"""Name of the LLM model used for generating responses."""
llm_model_max_token_size: int = field(default=int(os.getenv("MAX_TOKENS", 32768)))
llm_model_max_token_size: int = field(default=int(os.getenv("MAX_TOKENS", 32000)))
"""Maximum number of tokens allowed per LLM response."""
llm_model_max_async: int = field(default=int(os.getenv("MAX_ASYNC", 4)))
@ -1094,86 +1094,89 @@ class LightRAG:
}
)
# Semphore released, concurrency controlled by graph_db_lock in merge_nodes_and_edges instead
if file_extraction_stage_ok:
try:
# Get chunk_results from entity_relation_task
chunk_results = await entity_relation_task
await merge_nodes_and_edges(
chunk_results=chunk_results, # result collected from entity_relation_task
knowledge_graph_inst=self.chunk_entity_relation_graph,
entity_vdb=self.entities_vdb,
relationships_vdb=self.relationships_vdb,
global_config=asdict(self),
pipeline_status=pipeline_status,
pipeline_status_lock=pipeline_status_lock,
llm_response_cache=self.llm_response_cache,
current_file_number=current_file_number,
total_files=total_files,
file_path=file_path,
)
await self.doc_status.upsert(
{
doc_id: {
"status": DocStatus.PROCESSED,
"chunks_count": len(chunks),
"chunks_list": list(
chunks.keys()
), # 保留 chunks_list
"content": status_doc.content,
"content_summary": status_doc.content_summary,
"content_length": status_doc.content_length,
"created_at": status_doc.created_at,
"updated_at": datetime.now(
timezone.utc
).isoformat(),
"file_path": file_path,
}
}
)
# Call _insert_done after processing each file
await self._insert_done()
async with pipeline_status_lock:
log_message = f"Completed processing file {current_file_number}/{total_files}: {file_path}"
logger.info(log_message)
pipeline_status["latest_message"] = log_message
pipeline_status["history_messages"].append(log_message)
except Exception as e:
# Log error and update pipeline status
logger.error(traceback.format_exc())
error_msg = f"Merging stage failed in document {current_file_number}/{total_files}: {file_path}"
logger.error(error_msg)
async with pipeline_status_lock:
pipeline_status["latest_message"] = error_msg
pipeline_status["history_messages"].append(
traceback.format_exc()
# Concurrency is controlled by graph db lock for individual entities and relationships
if file_extraction_stage_ok:
try:
# Get chunk_results from entity_relation_task
chunk_results = await entity_relation_task
await merge_nodes_and_edges(
chunk_results=chunk_results, # result collected from entity_relation_task
knowledge_graph_inst=self.chunk_entity_relation_graph,
entity_vdb=self.entities_vdb,
relationships_vdb=self.relationships_vdb,
global_config=asdict(self),
pipeline_status=pipeline_status,
pipeline_status_lock=pipeline_status_lock,
llm_response_cache=self.llm_response_cache,
current_file_number=current_file_number,
total_files=total_files,
file_path=file_path,
)
pipeline_status["history_messages"].append(error_msg)
# Persistent llm cache
if self.llm_response_cache:
await self.llm_response_cache.index_done_callback()
# Update document status to failed
await self.doc_status.upsert(
{
doc_id: {
"status": DocStatus.FAILED,
"error": str(e),
"content": status_doc.content,
"content_summary": status_doc.content_summary,
"content_length": status_doc.content_length,
"created_at": status_doc.created_at,
"updated_at": datetime.now().isoformat(),
"file_path": file_path,
await self.doc_status.upsert(
{
doc_id: {
"status": DocStatus.PROCESSED,
"chunks_count": len(chunks),
"chunks_list": list(
chunks.keys()
), # 保留 chunks_list
"content": status_doc.content,
"content_summary": status_doc.content_summary,
"content_length": status_doc.content_length,
"created_at": status_doc.created_at,
"updated_at": datetime.now(
timezone.utc
).isoformat(),
"file_path": file_path,
}
}
}
)
)
# Call _insert_done after processing each file
await self._insert_done()
async with pipeline_status_lock:
log_message = f"Completed processing file {current_file_number}/{total_files}: {file_path}"
logger.info(log_message)
pipeline_status["latest_message"] = log_message
pipeline_status["history_messages"].append(
log_message
)
except Exception as e:
# Log error and update pipeline status
logger.error(traceback.format_exc())
error_msg = f"Merging stage failed in document {current_file_number}/{total_files}: {file_path}"
logger.error(error_msg)
async with pipeline_status_lock:
pipeline_status["latest_message"] = error_msg
pipeline_status["history_messages"].append(
traceback.format_exc()
)
pipeline_status["history_messages"].append(
error_msg
)
# Persistent llm cache
if self.llm_response_cache:
await self.llm_response_cache.index_done_callback()
# Update document status to failed
await self.doc_status.upsert(
{
doc_id: {
"status": DocStatus.FAILED,
"error": str(e),
"content": status_doc.content,
"content_summary": status_doc.content_summary,
"content_length": status_doc.content_length,
"created_at": status_doc.created_at,
"updated_at": datetime.now().isoformat(),
"file_path": file_path,
}
}
)
# Create processing tasks for all documents
doc_tasks = []

View file

@ -37,6 +37,7 @@ from .base import (
)
from .prompt import PROMPTS
from .constants import GRAPH_FIELD_SEP
from .kg.shared_storage import get_storage_keyed_lock
import time
from dotenv import load_dotenv
@ -117,7 +118,7 @@ async def _handle_entity_relation_summary(
tokenizer: Tokenizer = global_config["tokenizer"]
llm_max_tokens = global_config["llm_model_max_token_size"]
summary_max_tokens = global_config["summary_to_max_tokens"]
# summary_max_tokens = global_config["summary_to_max_tokens"]
language = global_config["addon_params"].get(
"language", PROMPTS["DEFAULT_LANGUAGE"]
@ -144,7 +145,7 @@ async def _handle_entity_relation_summary(
use_prompt,
use_llm_func,
llm_response_cache=llm_response_cache,
max_tokens=summary_max_tokens,
# max_tokens=summary_max_tokens,
cache_type="extract",
)
return summary
@ -274,20 +275,26 @@ async def _rebuild_knowledge_from_chunks(
pipeline_status: dict | None = None,
pipeline_status_lock=None,
) -> None:
"""Rebuild entity and relationship descriptions from cached extraction results
"""Rebuild entity and relationship descriptions from cached extraction results with parallel processing
This method uses cached LLM extraction results instead of calling LLM again,
following the same approach as the insert process.
following the same approach as the insert process. Now with parallel processing
controlled by llm_model_max_async and using get_storage_keyed_lock for data consistency.
Args:
entities_to_rebuild: Dict mapping entity_name -> set of remaining chunk_ids
relationships_to_rebuild: Dict mapping (src, tgt) -> set of remaining chunk_ids
text_chunks_data: Pre-loaded chunk data dict {chunk_id: chunk_data}
knowledge_graph_inst: Knowledge graph storage
entities_vdb: Entity vector database
relationships_vdb: Relationship vector database
text_chunks_storage: Text chunks storage
llm_response_cache: LLM response cache
global_config: Global configuration containing llm_model_max_async
pipeline_status: Pipeline status dictionary
pipeline_status_lock: Lock for pipeline status
"""
if not entities_to_rebuild and not relationships_to_rebuild:
return
rebuilt_entities_count = 0
rebuilt_relationships_count = 0
# Get all referenced chunk IDs
all_referenced_chunk_ids = set()
@ -296,7 +303,7 @@ async def _rebuild_knowledge_from_chunks(
for chunk_ids in relationships_to_rebuild.values():
all_referenced_chunk_ids.update(chunk_ids)
status_message = f"Rebuilding knowledge from {len(all_referenced_chunk_ids)} cached chunk extractions"
status_message = f"Rebuilding knowledge from {len(all_referenced_chunk_ids)} cached chunk extractions (parallel processing)"
logger.info(status_message)
if pipeline_status is not None and pipeline_status_lock is not None:
async with pipeline_status_lock:
@ -366,66 +373,116 @@ async def _rebuild_knowledge_from_chunks(
pipeline_status["history_messages"].append(status_message)
continue
# Rebuild entities
# Get max async tasks limit from global_config for semaphore control
graph_max_async = global_config.get("llm_model_max_async", 4) * 2
semaphore = asyncio.Semaphore(graph_max_async)
# Counters for tracking progress
rebuilt_entities_count = 0
rebuilt_relationships_count = 0
failed_entities_count = 0
failed_relationships_count = 0
async def _locked_rebuild_entity(entity_name, chunk_ids):
nonlocal rebuilt_entities_count, failed_entities_count
async with semaphore:
workspace = global_config.get("workspace", "")
namespace = f"{workspace}:GraphDB" if workspace else "GraphDB"
async with get_storage_keyed_lock(
[entity_name], namespace=namespace, enable_logging=False
):
try:
await _rebuild_single_entity(
knowledge_graph_inst=knowledge_graph_inst,
entities_vdb=entities_vdb,
entity_name=entity_name,
chunk_ids=chunk_ids,
chunk_entities=chunk_entities,
llm_response_cache=llm_response_cache,
global_config=global_config,
)
rebuilt_entities_count += 1
status_message = (
f"Rebuilt entity: {entity_name} from {len(chunk_ids)} chunks"
)
logger.info(status_message)
if pipeline_status is not None and pipeline_status_lock is not None:
async with pipeline_status_lock:
pipeline_status["latest_message"] = status_message
pipeline_status["history_messages"].append(status_message)
except Exception as e:
failed_entities_count += 1
status_message = f"Failed to rebuild entity {entity_name}: {e}"
logger.info(status_message) # Per requirement, change to info
if pipeline_status is not None and pipeline_status_lock is not None:
async with pipeline_status_lock:
pipeline_status["latest_message"] = status_message
pipeline_status["history_messages"].append(status_message)
async def _locked_rebuild_relationship(src, tgt, chunk_ids):
nonlocal rebuilt_relationships_count, failed_relationships_count
async with semaphore:
workspace = global_config.get("workspace", "")
namespace = f"{workspace}:GraphDB" if workspace else "GraphDB"
async with get_storage_keyed_lock(
f"{src}-{tgt}", namespace=namespace, enable_logging=False
):
try:
await _rebuild_single_relationship(
knowledge_graph_inst=knowledge_graph_inst,
relationships_vdb=relationships_vdb,
src=src,
tgt=tgt,
chunk_ids=chunk_ids,
chunk_relationships=chunk_relationships,
llm_response_cache=llm_response_cache,
global_config=global_config,
)
rebuilt_relationships_count += 1
status_message = f"Rebuilt relationship: {src}->{tgt} from {len(chunk_ids)} chunks"
logger.info(status_message)
if pipeline_status is not None and pipeline_status_lock is not None:
async with pipeline_status_lock:
pipeline_status["latest_message"] = status_message
pipeline_status["history_messages"].append(status_message)
except Exception as e:
failed_relationships_count += 1
status_message = f"Failed to rebuild relationship {src}->{tgt}: {e}"
logger.info(status_message) # Per requirement, change to info
if pipeline_status is not None and pipeline_status_lock is not None:
async with pipeline_status_lock:
pipeline_status["latest_message"] = status_message
pipeline_status["history_messages"].append(status_message)
# Create tasks for parallel processing
tasks = []
# Add entity rebuilding tasks
for entity_name, chunk_ids in entities_to_rebuild.items():
try:
await _rebuild_single_entity(
knowledge_graph_inst=knowledge_graph_inst,
entities_vdb=entities_vdb,
entity_name=entity_name,
chunk_ids=chunk_ids,
chunk_entities=chunk_entities,
llm_response_cache=llm_response_cache,
global_config=global_config,
)
rebuilt_entities_count += 1
status_message = (
f"Rebuilt entity: {entity_name} from {len(chunk_ids)} chunks"
)
logger.info(status_message)
if pipeline_status is not None and pipeline_status_lock is not None:
async with pipeline_status_lock:
pipeline_status["latest_message"] = status_message
pipeline_status["history_messages"].append(status_message)
except Exception as e:
status_message = f"Failed to rebuild entity {entity_name}: {e}"
logger.info(status_message) # Per requirement, change to info
if pipeline_status is not None and pipeline_status_lock is not None:
async with pipeline_status_lock:
pipeline_status["latest_message"] = status_message
pipeline_status["history_messages"].append(status_message)
task = asyncio.create_task(_locked_rebuild_entity(entity_name, chunk_ids))
tasks.append(task)
# Rebuild relationships
# Add relationship rebuilding tasks
for (src, tgt), chunk_ids in relationships_to_rebuild.items():
try:
await _rebuild_single_relationship(
knowledge_graph_inst=knowledge_graph_inst,
relationships_vdb=relationships_vdb,
src=src,
tgt=tgt,
chunk_ids=chunk_ids,
chunk_relationships=chunk_relationships,
llm_response_cache=llm_response_cache,
global_config=global_config,
)
rebuilt_relationships_count += 1
status_message = (
f"Rebuilt relationship: {src}->{tgt} from {len(chunk_ids)} chunks"
)
logger.info(status_message)
if pipeline_status is not None and pipeline_status_lock is not None:
async with pipeline_status_lock:
pipeline_status["latest_message"] = status_message
pipeline_status["history_messages"].append(status_message)
except Exception as e:
status_message = f"Failed to rebuild relationship {src}->{tgt}: {e}"
logger.info(status_message)
if pipeline_status is not None and pipeline_status_lock is not None:
async with pipeline_status_lock:
pipeline_status["latest_message"] = status_message
pipeline_status["history_messages"].append(status_message)
task = asyncio.create_task(_locked_rebuild_relationship(src, tgt, chunk_ids))
tasks.append(task)
# Log parallel processing start
status_message = f"Starting parallel rebuild of {len(entities_to_rebuild)} entities and {len(relationships_to_rebuild)} relationships (async: {graph_max_async})"
logger.info(status_message)
if pipeline_status is not None and pipeline_status_lock is not None:
async with pipeline_status_lock:
pipeline_status["latest_message"] = status_message
pipeline_status["history_messages"].append(status_message)
# Execute all tasks in parallel with semaphore control
await asyncio.gather(*tasks)
# Final status report
status_message = f"KG rebuild completed: {rebuilt_entities_count} entities and {rebuilt_relationships_count} relationships rebuilt successfully."
if failed_entities_count > 0 or failed_relationships_count > 0:
status_message += f" Failed: {failed_entities_count} entities, {failed_relationships_count} relationships."
status_message = f"KG rebuild completed: {rebuilt_entities_count} entities and {rebuilt_relationships_count} relationships."
logger.info(status_message)
if pipeline_status is not None and pipeline_status_lock is not None:
async with pipeline_status_lock:
@ -630,7 +687,10 @@ async def _rebuild_single_entity(
# Helper function to generate final description with optional LLM summary
async def _generate_final_description(combined_description: str) -> str:
if len(combined_description) > global_config["summary_to_max_tokens"]:
force_llm_summary_on_merge = global_config["force_llm_summary_on_merge"]
num_fragment = combined_description.count(GRAPH_FIELD_SEP) + 1
if num_fragment >= force_llm_summary_on_merge:
return await _handle_entity_relation_summary(
entity_name,
combined_description,
@ -725,7 +785,11 @@ async def _rebuild_single_relationship(
llm_response_cache: BaseKVStorage,
global_config: dict[str, str],
) -> None:
"""Rebuild a single relationship from cached extraction results"""
"""Rebuild a single relationship from cached extraction results
Note: This function assumes the caller has already acquired the appropriate
keyed lock for the relationship pair to ensure thread safety.
"""
# Get current relationship data
current_relationship = await knowledge_graph_inst.get_edge(src, tgt)
@ -781,8 +845,11 @@ async def _rebuild_single_relationship(
# )
weight = sum(weights) if weights else current_relationship.get("weight", 1.0)
# Use summary if description is too long
if len(combined_description) > global_config["summary_to_max_tokens"]:
# Use summary if description has too many fragments
force_llm_summary_on_merge = global_config["force_llm_summary_on_merge"]
num_fragment = combined_description.count(GRAPH_FIELD_SEP) + 1
if num_fragment >= force_llm_summary_on_merge:
final_description = await _handle_entity_relation_summary(
f"{src}-{tgt}",
combined_description,
@ -1015,28 +1082,23 @@ async def _merge_edges_then_upsert(
)
for need_insert_id in [src_id, tgt_id]:
if not (await knowledge_graph_inst.has_node(need_insert_id)):
# # Discard this edge if the node does not exist
# if need_insert_id == src_id:
# logger.warning(
# f"Discard edge: {src_id} - {tgt_id} | Source node missing"
# )
# else:
# logger.warning(
# f"Discard edge: {src_id} - {tgt_id} | Target node missing"
# )
# return None
await knowledge_graph_inst.upsert_node(
need_insert_id,
node_data={
"entity_id": need_insert_id,
"source_id": source_id,
"description": description,
"entity_type": "UNKNOWN",
"file_path": file_path,
"created_at": int(time.time()),
},
)
workspace = global_config.get("workspace", "")
namespace = f"{workspace}:GraphDB" if workspace else "GraphDB"
async with get_storage_keyed_lock(
[need_insert_id], namespace=namespace, enable_logging=False
):
if not (await knowledge_graph_inst.has_node(need_insert_id)):
await knowledge_graph_inst.upsert_node(
need_insert_id,
node_data={
"entity_id": need_insert_id,
"source_id": source_id,
"description": description,
"entity_type": "UNKNOWN",
"file_path": file_path,
"created_at": int(time.time()),
},
)
force_llm_summary_on_merge = global_config["force_llm_summary_on_merge"]
@ -1118,8 +1180,6 @@ async def merge_nodes_and_edges(
pipeline_status_lock: Lock for pipeline status
llm_response_cache: LLM response cache
"""
# Get lock manager from shared storage
from .kg.shared_storage import get_graph_db_lock
# Collect all nodes and edges from all chunks
all_nodes = defaultdict(list)
@ -1136,94 +1196,109 @@ async def merge_nodes_and_edges(
all_edges[sorted_edge_key].extend(edges)
# Centralized processing of all nodes and edges
entities_data = []
relationships_data = []
total_entities_count = len(all_nodes)
total_relations_count = len(all_edges)
# Merge nodes and edges
# Use graph database lock to ensure atomic merges and updates
graph_db_lock = get_graph_db_lock(enable_logging=False)
async with graph_db_lock:
async with pipeline_status_lock:
log_message = (
f"Merging stage {current_file_number}/{total_files}: {file_path}"
)
logger.info(log_message)
pipeline_status["latest_message"] = log_message
pipeline_status["history_messages"].append(log_message)
log_message = f"Merging stage {current_file_number}/{total_files}: {file_path}"
logger.info(log_message)
async with pipeline_status_lock:
pipeline_status["latest_message"] = log_message
pipeline_status["history_messages"].append(log_message)
# Process and update all entities at once
for entity_name, entities in all_nodes.items():
entity_data = await _merge_nodes_then_upsert(
entity_name,
entities,
knowledge_graph_inst,
global_config,
pipeline_status,
pipeline_status_lock,
llm_response_cache,
)
entities_data.append(entity_data)
# Get max async tasks limit from global_config for semaphore control
graph_max_async = global_config.get("llm_model_max_async", 4) * 2
semaphore = asyncio.Semaphore(graph_max_async)
# Process and update all relationships at once
for edge_key, edges in all_edges.items():
edge_data = await _merge_edges_then_upsert(
edge_key[0],
edge_key[1],
edges,
knowledge_graph_inst,
global_config,
pipeline_status,
pipeline_status_lock,
llm_response_cache,
)
if edge_data is not None:
relationships_data.append(edge_data)
# Process and update all entities and relationships in parallel
log_message = f"Processing: {total_entities_count} entities and {total_relations_count} relations (async: {graph_max_async})"
logger.info(log_message)
async with pipeline_status_lock:
pipeline_status["latest_message"] = log_message
pipeline_status["history_messages"].append(log_message)
# Update total counts
total_entities_count = len(entities_data)
total_relations_count = len(relationships_data)
async def _locked_process_entity_name(entity_name, entities):
async with semaphore:
workspace = global_config.get("workspace", "")
namespace = f"{workspace}:GraphDB" if workspace else "GraphDB"
async with get_storage_keyed_lock(
[entity_name], namespace=namespace, enable_logging=False
):
entity_data = await _merge_nodes_then_upsert(
entity_name,
entities,
knowledge_graph_inst,
global_config,
pipeline_status,
pipeline_status_lock,
llm_response_cache,
)
if entity_vdb is not None:
data_for_vdb = {
compute_mdhash_id(entity_data["entity_name"], prefix="ent-"): {
"entity_name": entity_data["entity_name"],
"entity_type": entity_data["entity_type"],
"content": f"{entity_data['entity_name']}\n{entity_data['description']}",
"source_id": entity_data["source_id"],
"file_path": entity_data.get("file_path", "unknown_source"),
}
}
await entity_vdb.upsert(data_for_vdb)
return entity_data
log_message = f"Updating {total_entities_count} entities {current_file_number}/{total_files}: {file_path}"
logger.info(log_message)
if pipeline_status is not None:
async with pipeline_status_lock:
pipeline_status["latest_message"] = log_message
pipeline_status["history_messages"].append(log_message)
async def _locked_process_edges(edge_key, edges):
async with semaphore:
workspace = global_config.get("workspace", "")
namespace = f"{workspace}:GraphDB" if workspace else "GraphDB"
async with get_storage_keyed_lock(
f"{edge_key[0]}-{edge_key[1]}",
namespace=namespace,
enable_logging=False,
):
edge_data = await _merge_edges_then_upsert(
edge_key[0],
edge_key[1],
edges,
knowledge_graph_inst,
global_config,
pipeline_status,
pipeline_status_lock,
llm_response_cache,
)
if edge_data is None:
return None
# Update vector databases with all collected data
if entity_vdb is not None and entities_data:
data_for_vdb = {
compute_mdhash_id(dp["entity_name"], prefix="ent-"): {
"entity_name": dp["entity_name"],
"entity_type": dp["entity_type"],
"content": f"{dp['entity_name']}\n{dp['description']}",
"source_id": dp["source_id"],
"file_path": dp.get("file_path", "unknown_source"),
}
for dp in entities_data
}
await entity_vdb.upsert(data_for_vdb)
if relationships_vdb is not None:
data_for_vdb = {
compute_mdhash_id(
edge_data["src_id"] + edge_data["tgt_id"], prefix="rel-"
): {
"src_id": edge_data["src_id"],
"tgt_id": edge_data["tgt_id"],
"keywords": edge_data["keywords"],
"content": f"{edge_data['src_id']}\t{edge_data['tgt_id']}\n{edge_data['keywords']}\n{edge_data['description']}",
"source_id": edge_data["source_id"],
"file_path": edge_data.get("file_path", "unknown_source"),
}
}
await relationships_vdb.upsert(data_for_vdb)
return edge_data
log_message = f"Updating {total_relations_count} relations {current_file_number}/{total_files}: {file_path}"
logger.info(log_message)
if pipeline_status is not None:
async with pipeline_status_lock:
pipeline_status["latest_message"] = log_message
pipeline_status["history_messages"].append(log_message)
# Create a single task queue for both entities and edges
tasks = []
if relationships_vdb is not None and relationships_data:
data_for_vdb = {
compute_mdhash_id(dp["src_id"] + dp["tgt_id"], prefix="rel-"): {
"src_id": dp["src_id"],
"tgt_id": dp["tgt_id"],
"keywords": dp["keywords"],
"content": f"{dp['src_id']}\t{dp['tgt_id']}\n{dp['keywords']}\n{dp['description']}",
"source_id": dp["source_id"],
"file_path": dp.get("file_path", "unknown_source"),
}
for dp in relationships_data
}
await relationships_vdb.upsert(data_for_vdb)
# Add entity processing tasks
for entity_name, entities in all_nodes.items():
tasks.append(
asyncio.create_task(_locked_process_entity_name(entity_name, entities))
)
# Add edge processing tasks
for edge_key, edges in all_edges.items():
tasks.append(asyncio.create_task(_locked_process_edges(edge_key, edges)))
# Execute all tasks in parallel with semaphore control
await asyncio.gather(*tasks)
async def extract_entities(
@ -1433,8 +1508,8 @@ async def extract_entities(
return maybe_nodes, maybe_edges
# Get max async tasks limit from global_config
llm_model_max_async = global_config.get("llm_model_max_async", 4)
semaphore = asyncio.Semaphore(llm_model_max_async)
chunk_max_async = global_config.get("llm_model_max_async", 4)
semaphore = asyncio.Semaphore(chunk_max_async)
async def _process_with_semaphore(chunk):
async with semaphore:

View file

@ -42,12 +42,28 @@ export type LightragStatus = {
vector_storage: string
workspace?: string
max_graph_nodes?: string
enable_rerank?: boolean
rerank_model?: string | null
rerank_binding_host?: string | null
}
update_status?: Record<string, any>
core_version?: string
api_version?: string
auth_mode?: 'enabled' | 'disabled'
pipeline_busy: boolean
keyed_locks?: {
process_id: number
cleanup_performed: {
mp_cleaned: number
async_cleaned: number
}
current_status: {
total_mp_locks: number
pending_mp_cleanup: number
total_async_locks: number
pending_async_cleanup: number
}
}
webui_title?: string
webui_description?: string
}

View file

@ -2,7 +2,6 @@ import { useCallback } from 'react'
import { QueryMode, QueryRequest } from '@/api/lightrag'
// Removed unused import for Text component
import Checkbox from '@/components/ui/Checkbox'
import NumberInput from '@/components/ui/NumberInput'
import Input from '@/components/ui/Input'
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from '@/components/ui/Card'
import {
@ -121,11 +120,20 @@ export default function QuerySettings() {
</TooltipProvider>
<div>
{/* Removed sr-only label */}
<NumberInput
<Input
id="top_k"
stepper={1}
value={querySettings.top_k}
onValueChange={(v) => handleChange('top_k', v)}
type="number"
value={querySettings.top_k ?? ''}
onChange={(e) => {
const value = e.target.value
handleChange('top_k', value === '' ? '' : parseInt(value) || 0)
}}
onBlur={(e) => {
const value = e.target.value
if (value === '' || isNaN(parseInt(value))) {
handleChange('top_k', 1)
}
}}
min={1}
placeholder={t('retrievePanel.querySettings.topKPlaceholder')}
/>
@ -278,15 +286,23 @@ export default function QuerySettings() {
</TooltipProvider>
<div>
{/* Removed sr-only label */}
<NumberInput
className="!border-input"
<Input
id="history_turns"
stepper={1}
type="text"
value={querySettings.history_turns}
onValueChange={(v) => handleChange('history_turns', v)}
type="number"
value={querySettings.history_turns ?? ''}
onChange={(e) => {
const value = e.target.value
handleChange('history_turns', value === '' ? '' : parseInt(value) || 0)
}}
onBlur={(e) => {
const value = e.target.value
if (value === '' || isNaN(parseInt(value))) {
handleChange('history_turns', 0)
}
}}
min={0}
placeholder={t('retrievePanel.querySettings.historyTurnsPlaceholder')}
className="h-9"
/>
</div>
</>

View file

@ -11,7 +11,7 @@ const StatusCard = ({ status }: { status: LightragStatus | null }) => {
<div className="min-w-[300px] space-y-2 text-xs">
<div className="space-y-1">
<h4 className="font-medium">{t('graphPanel.statusCard.storageInfo')}</h4>
<div className="text-foreground grid grid-cols-[120px_1fr] gap-1">
<div className="text-foreground grid grid-cols-[160px_1fr] gap-1">
<span>{t('graphPanel.statusCard.workingDirectory')}:</span>
<span className="truncate">{status.working_directory}</span>
<span>{t('graphPanel.statusCard.inputDirectory')}:</span>
@ -21,7 +21,7 @@ const StatusCard = ({ status }: { status: LightragStatus | null }) => {
<div className="space-y-1">
<h4 className="font-medium">{t('graphPanel.statusCard.llmConfig')}</h4>
<div className="text-foreground grid grid-cols-[120px_1fr] gap-1">
<div className="text-foreground grid grid-cols-[160px_1fr] gap-1">
<span>{t('graphPanel.statusCard.llmBinding')}:</span>
<span>{status.configuration.llm_binding}</span>
<span>{t('graphPanel.statusCard.llmBindingHost')}:</span>
@ -35,7 +35,7 @@ const StatusCard = ({ status }: { status: LightragStatus | null }) => {
<div className="space-y-1">
<h4 className="font-medium">{t('graphPanel.statusCard.embeddingConfig')}</h4>
<div className="text-foreground grid grid-cols-[120px_1fr] gap-1">
<div className="text-foreground grid grid-cols-[160px_1fr] gap-1">
<span>{t('graphPanel.statusCard.embeddingBinding')}:</span>
<span>{status.configuration.embedding_binding}</span>
<span>{t('graphPanel.statusCard.embeddingBindingHost')}:</span>
@ -45,9 +45,21 @@ const StatusCard = ({ status }: { status: LightragStatus | null }) => {
</div>
</div>
{status.configuration.enable_rerank && (
<div className="space-y-1">
<h4 className="font-medium">{t('graphPanel.statusCard.rerankerConfig')}</h4>
<div className="text-foreground grid grid-cols-[160px_1fr] gap-1">
<span>{t('graphPanel.statusCard.rerankerBindingHost')}:</span>
<span>{status.configuration.rerank_binding_host || '-'}</span>
<span>{t('graphPanel.statusCard.rerankerModel')}:</span>
<span>{status.configuration.rerank_model || '-'}</span>
</div>
</div>
)}
<div className="space-y-1">
<h4 className="font-medium">{t('graphPanel.statusCard.storageConfig')}</h4>
<div className="text-foreground grid grid-cols-[120px_1fr] gap-1">
<div className="text-foreground grid grid-cols-[160px_1fr] gap-1">
<span>{t('graphPanel.statusCard.kvStorage')}:</span>
<span>{status.configuration.kv_storage}</span>
<span>{t('graphPanel.statusCard.docStatusStorage')}:</span>
@ -60,6 +72,16 @@ const StatusCard = ({ status }: { status: LightragStatus | null }) => {
<span>{status.configuration.workspace || '-'}</span>
<span>{t('graphPanel.statusCard.maxGraphNodes')}:</span>
<span>{status.configuration.max_graph_nodes || '-'}</span>
{status.keyed_locks && (
<>
<span>{t('graphPanel.statusCard.lockStatus')}:</span>
<span>
mp {status.keyed_locks.current_status.pending_mp_cleanup}/{status.keyed_locks.current_status.total_mp_locks} |
async {status.keyed_locks.current_status.pending_async_cleanup}/{status.keyed_locks.current_status.total_async_locks}
(pid: {status.keyed_locks.process_id})
</span>
</>
)}
</div>
</div>
</div>

View file

@ -20,7 +20,7 @@ const StatusDialog = ({ open, onOpenChange, status }: StatusDialogProps) => {
return (
<Dialog open={open} onOpenChange={onOpenChange}>
<DialogContent className="sm:max-w-[500px]">
<DialogContent className="sm:max-w-[700px]">
<DialogHeader>
<DialogTitle>{t('graphPanel.statusDialog.title')}</DialogTitle>
<DialogDescription>

View file

@ -252,12 +252,12 @@
"inputDirectory": "دليل الإدخال",
"llmConfig": "تكوين نموذج اللغة الكبير",
"llmBinding": "ربط نموذج اللغة الكبير",
"llmBindingHost": "مضيف ربط نموذج اللغة الكبير",
"llmBindingHost": "نقطة نهاية نموذج اللغة الكبير",
"llmModel": "نموذج اللغة الكبير",
"maxTokens": "أقصى عدد من الرموز",
"embeddingConfig": "تكوين التضمين",
"embeddingBinding": "ربط التضمين",
"embeddingBindingHost": "مضيف ربط التضمين",
"embeddingBindingHost": "نقطة نهاية التضمين",
"embeddingModel": "نموذج التضمين",
"storageConfig": "تكوين التخزين",
"kvStorage": "تخزين المفتاح-القيمة",
@ -265,7 +265,11 @@
"graphStorage": "تخزين الرسم البياني",
"vectorStorage": "تخزين المتجهات",
"workspace": "مساحة العمل",
"maxGraphNodes": "الحد الأقصى لعقد الرسم البياني"
"maxGraphNodes": "الحد الأقصى لعقد الرسم البياني",
"rerankerConfig": "تكوين إعادة الترتيب",
"rerankerBindingHost": "نقطة نهاية إعادة الترتيب",
"rerankerModel": "نموذج إعادة الترتيب",
"lockStatus": "حالة القفل"
},
"propertiesView": {
"editProperty": "تعديل {{property}}",

View file

@ -252,12 +252,12 @@
"inputDirectory": "Input Directory",
"llmConfig": "LLM Configuration",
"llmBinding": "LLM Binding",
"llmBindingHost": "LLM Binding Host",
"llmBindingHost": "LLM Endpoint",
"llmModel": "LLM Model",
"maxTokens": "Max Tokens",
"embeddingConfig": "Embedding Configuration",
"embeddingBinding": "Embedding Binding",
"embeddingBindingHost": "Embedding Binding Host",
"embeddingBindingHost": "Embedding Endpoint",
"embeddingModel": "Embedding Model",
"storageConfig": "Storage Configuration",
"kvStorage": "KV Storage",
@ -265,7 +265,11 @@
"graphStorage": "Graph Storage",
"vectorStorage": "Vector Storage",
"workspace": "Workspace",
"maxGraphNodes": "Max Graph Nodes"
"maxGraphNodes": "Max Graph Nodes",
"rerankerConfig": "Reranker Configuration",
"rerankerBindingHost": "Reranker Endpoint",
"rerankerModel": "Reranker Model",
"lockStatus": "Lock Status"
},
"propertiesView": {
"editProperty": "Edit {{property}}",

View file

@ -252,12 +252,12 @@
"inputDirectory": "Répertoire d'entrée",
"llmConfig": "Configuration du modèle de langage",
"llmBinding": "Liaison du modèle de langage",
"llmBindingHost": "Hôte de liaison du modèle de langage",
"llmBindingHost": "Point de terminaison LLM",
"llmModel": "Modèle de langage",
"maxTokens": "Nombre maximum de jetons",
"embeddingConfig": "Configuration d'incorporation",
"embeddingBinding": "Liaison d'incorporation",
"embeddingBindingHost": "Hôte de liaison d'incorporation",
"embeddingBindingHost": "Point de terminaison d'incorporation",
"embeddingModel": "Modèle d'incorporation",
"storageConfig": "Configuration de stockage",
"kvStorage": "Stockage clé-valeur",
@ -265,7 +265,11 @@
"graphStorage": "Stockage du graphe",
"vectorStorage": "Stockage vectoriel",
"workspace": "Espace de travail",
"maxGraphNodes": "Nombre maximum de nœuds du graphe"
"maxGraphNodes": "Nombre maximum de nœuds du graphe",
"rerankerConfig": "Configuration du reclassement",
"rerankerBindingHost": "Point de terminaison de reclassement",
"rerankerModel": "Modèle de reclassement",
"lockStatus": "État des verrous"
},
"propertiesView": {
"editProperty": "Modifier {{property}}",

View file

@ -252,12 +252,12 @@
"inputDirectory": "输入目录",
"llmConfig": "LLM配置",
"llmBinding": "LLM绑定",
"llmBindingHost": "LLM绑定主机",
"llmBindingHost": "LLM端点",
"llmModel": "LLM模型",
"maxTokens": "最大令牌数",
"embeddingConfig": "嵌入配置",
"embeddingBinding": "嵌入绑定",
"embeddingBindingHost": "嵌入绑定主机",
"embeddingBindingHost": "嵌入端点",
"embeddingModel": "嵌入模型",
"storageConfig": "存储配置",
"kvStorage": "KV存储",
@ -265,7 +265,11 @@
"graphStorage": "图存储",
"vectorStorage": "向量存储",
"workspace": "工作空间",
"maxGraphNodes": "最大图节点数"
"maxGraphNodes": "最大图节点数",
"rerankerConfig": "重排序配置",
"rerankerBindingHost": "重排序端点",
"rerankerModel": "重排序模型",
"lockStatus": "锁状态"
},
"propertiesView": {
"editProperty": "编辑{{property}}",

View file

@ -252,12 +252,12 @@
"inputDirectory": "輸入目錄",
"llmConfig": "LLM 設定",
"llmBinding": "LLM 綁定",
"llmBindingHost": "LLM 綁定主機",
"llmBindingHost": "LLM 端點",
"llmModel": "LLM 模型",
"maxTokens": "最大權杖數",
"embeddingConfig": "嵌入設定",
"embeddingBinding": "嵌入綁定",
"embeddingBindingHost": "嵌入綁定主機",
"embeddingBindingHost": "嵌入端點",
"embeddingModel": "嵌入模型",
"storageConfig": "儲存設定",
"kvStorage": "KV 儲存",
@ -265,7 +265,11 @@
"graphStorage": "圖形儲存",
"vectorStorage": "向量儲存",
"workspace": "工作空間",
"maxGraphNodes": "最大圖形節點數"
"maxGraphNodes": "最大圖形節點數",
"rerankerConfig": "重排序設定",
"rerankerBindingHost": "重排序端點",
"rerankerModel": "重排序模型",
"lockStatus": "鎖定狀態"
},
"propertiesView": {
"editProperty": "編輯{{property}}",

View file

@ -82,8 +82,10 @@ Documentation = "https://github.com/HKUDS/LightRAG"
Repository = "https://github.com/HKUDS/LightRAG"
"Bug Tracker" = "https://github.com/HKUDS/LightRAG/issues"
[tool.setuptools.packages.find]
include = ["lightrag*"]
[tool.setuptools]
packages = ["lightrag"]
include-package-data = true
[tool.setuptools.dynamic]