Merge branch 'main' into tongda/main

This commit is contained in:
yangdx 2025-09-09 18:27:56 +08:00
commit 3477e9f919
80 changed files with 1666 additions and 1079 deletions

View file

@ -1,4 +1,4 @@
name: Build Docker Image manually
name: Build Test Docker Image manually
on:
workflow_dispatch:
@ -30,6 +30,12 @@ jobs:
echo "tag=$LATEST_TAG" >> $GITHUB_OUTPUT
echo "image_tag=$LATEST_TAG" >> $GITHUB_OUTPUT
- name: Update version in __init__.py
run: |
sed -i "s/__version__ = \".*\"/__version__ = \"${{ steps.get_tag.outputs.tag }}\"/" lightrag/__init__.py
echo "Updated __init__.py with version ${{ steps.get_tag.outputs.tag }}"
cat lightrag/__init__.py | grep __version__
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3

View file

@ -1,4 +1,4 @@
name: Build Docker Image on Release
name: Build Latest Docker Image on Release
on:
release:
@ -15,6 +15,8 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0 # Fetch all history for tags
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
@ -26,14 +28,27 @@ jobs:
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Get latest tag
id: get_tag
run: |
TAG=$(git describe --tags --abbrev=0)
echo "Found tag: $TAG"
echo "tag=$TAG" >> $GITHUB_OUTPUT
- name: Update version in __init__.py
run: |
sed -i "s/__version__ = \".*\"/__version__ = \"${{ steps.get_tag.outputs.tag }}\"/" lightrag/__init__.py
echo "Updated __init__.py with version ${{ steps.get_tag.outputs.tag }}"
cat lightrag/__init__.py | grep __version__
- name: Extract metadata for Docker
id: meta
uses: docker/metadata-action@v5
with:
images: ghcr.io/${{ github.repository }}
tags: |
type=semver,pattern={{version}}
type=raw,value=latest,enable={{is_default_branch}}
type=raw,value=${{ steps.get_tag.outputs.tag }}
type=raw,value=latest
- name: Build and push Docker image
uses: docker/build-push-action@v5

View file

@ -3,6 +3,7 @@ name: Upload LightRAG-hku Package
on:
release:
types: [published]
workflow_dispatch:
permissions:
contents: read
@ -13,11 +14,27 @@ jobs:
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0 # Fetch all history for tags
- uses: actions/setup-python@v5
with:
python-version: "3.x"
- name: Get version from tag
id: get_version
run: |
TAG=$(git describe --tags --abbrev=0)
echo "Found tag: $TAG"
echo "Extracted version: $TAG"
echo "version=$TAG" >> $GITHUB_OUTPUT
- name: Update version in __init__.py
run: |
sed -i "s/__version__ = \".*\"/__version__ = \"${{ steps.get_version.outputs.version }}\"/" lightrag/__init__.py
echo "Updated __init__.py with version ${{ steps.get_version.outputs.version }}"
cat lightrag/__init__.py | grep __version__
- name: Build release distributions
run: |
python -m pip install build

View file

@ -107,7 +107,7 @@ RERANK_BINDING=null
# RERANK_BINDING_API_KEY=your_rerank_api_key_here
### Default value for Jina AI
# RERANK_MODELjina-reranker-v2-base-multilingual
# RERANK_MODEL=jina-reranker-v2-base-multilingual
# RERANK_BINDING_HOST=https://api.jina.ai/v1/rerank
# RERANK_BINDING_API_KEY=your_rerank_api_key_here
@ -125,7 +125,7 @@ ENABLE_LLM_CACHE_FOR_EXTRACT=true
SUMMARY_LANGUAGE=English
### Entity types that the LLM will attempt to recognize
# ENTITY_TYPES='["Organization", "Person", "Equiment", "Product", "Technology", "Location", "Event", "Category"]'
# ENTITY_TYPES='["Organization", "Person", "Location", "Event", "Technology", "Equipment", "Product", "Document", "Category"]'
### Chunk size for document splitting, 500~1500 is recommended
# CHUNK_SIZE=1200
@ -175,11 +175,8 @@ LLM_BINDING_API_KEY=your_api_key
# LLM_BINDING=openai
### OpenAI Specific Parameters
### To mitigate endless output loops and prevent greedy decoding for Qwen3, set the temperature parameter to a value between 0.8 and 1.0
# OPENAI_LLM_TEMPERATURE=1.0
# OPENAI_LLM_REASONING_EFFORT=low
### If the presence penalty still can not stop the model from generates repetitive or unconstrained output
# OPENAI_LLM_MAX_COMPLETION_TOKENS=16384
### Set the max_output_tokens to mitigate endless output of some LLM (less than LLM_TIMEOUT * llm_output_tokens/second, i.e. 9000 = 180s * 50 tokens/s)
# OPENAI_LLM_MAX_TOKENS=9000
### OpenRouter Specific Parameters
# OPENAI_LLM_EXTRA_BODY='{"reasoning": {"enabled": false}}'
@ -192,9 +189,10 @@ LLM_BINDING_API_KEY=your_api_key
### Ollama Server Specific Parameters
### OLLAMA_LLM_NUM_CTX must be provided, and should at least larger than MAX_TOTAL_TOKENS + 2000
OLLAMA_LLM_NUM_CTX=32768
# OLLAMA_LLM_TEMPERATURE=1.0
### Set the max_output_tokens to mitigate endless output of some LLM (less than LLM_TIMEOUT * llm_output_tokens/second, i.e. 9000 = 180s * 50 tokens/s)
# OLLAMA_LLM_NUM_PREDICT=9000
### Stop sequences for Ollama LLM
# OLLAMA_LLM_STOP='["</s>", "Assistant:", "\n\n"]'
# OLLAMA_LLM_STOP='["</s>", "<|EOT|>"]'
### use the following command to see all support options for Ollama LLM
### lightrag-server --llm-binding ollama --help

View file

@ -1 +1 @@
__api_version__ = "0211"
__api_version__ = "0218"

View file

@ -417,7 +417,7 @@ def update_uvicorn_mode_config():
global_args.workers = 1
# Log warning directly here
logging.warning(
f"In uvicorn mode, workers parameter was set to {original_workers}. Forcing workers=1"
f">> Forcing workers=1 in uvicorn mode(Ignoring workers={original_workers})"
)

View file

@ -97,11 +97,63 @@ def setup_signal_handlers():
signal.signal(signal.SIGTERM, signal_handler) # kill command
class LLMConfigCache:
"""Smart LLM and Embedding configuration cache class"""
def __init__(self, args):
self.args = args
# Initialize configurations based on binding conditions
self.openai_llm_options = None
self.ollama_llm_options = None
self.ollama_embedding_options = None
# Only initialize and log OpenAI options when using OpenAI-related bindings
if args.llm_binding in ["openai", "azure_openai"]:
from lightrag.llm.binding_options import OpenAILLMOptions
self.openai_llm_options = OpenAILLMOptions.options_dict(args)
logger.info(f"OpenAI LLM Options: {self.openai_llm_options}")
# Only initialize and log Ollama LLM options when using Ollama LLM binding
if args.llm_binding == "ollama":
try:
from lightrag.llm.binding_options import OllamaLLMOptions
self.ollama_llm_options = OllamaLLMOptions.options_dict(args)
logger.info(f"Ollama LLM Options: {self.ollama_llm_options}")
except ImportError:
logger.warning(
"OllamaLLMOptions not available, using default configuration"
)
self.ollama_llm_options = {}
# Only initialize and log Ollama Embedding options when using Ollama Embedding binding
if args.embedding_binding == "ollama":
try:
from lightrag.llm.binding_options import OllamaEmbeddingOptions
self.ollama_embedding_options = OllamaEmbeddingOptions.options_dict(
args
)
logger.info(
f"Ollama Embedding Options: {self.ollama_embedding_options}"
)
except ImportError:
logger.warning(
"OllamaEmbeddingOptions not available, using default configuration"
)
self.ollama_embedding_options = {}
def create_app(args):
# Setup logging
logger.setLevel(args.log_level)
set_verbose_debug(args.verbose)
# Create configuration cache (this will output configuration logs)
config_cache = LLMConfigCache(args)
# Verify that bindings are correctly setup
if args.llm_binding not in [
"lollms",
@ -238,10 +290,85 @@ def create_app(args):
# Create working directory if it doesn't exist
Path(args.working_dir).mkdir(parents=True, exist_ok=True)
def create_optimized_openai_llm_func(
config_cache: LLMConfigCache, args, llm_timeout: int
):
"""Create optimized OpenAI LLM function with pre-processed configuration"""
async def optimized_openai_alike_model_complete(
prompt,
system_prompt=None,
history_messages=None,
keyword_extraction=False,
**kwargs,
) -> str:
from lightrag.llm.openai import openai_complete_if_cache
keyword_extraction = kwargs.pop("keyword_extraction", None)
if keyword_extraction:
kwargs["response_format"] = GPTKeywordExtractionFormat
if history_messages is None:
history_messages = []
# Use pre-processed configuration to avoid repeated parsing
kwargs["timeout"] = llm_timeout
if config_cache.openai_llm_options:
kwargs.update(config_cache.openai_llm_options)
return await openai_complete_if_cache(
args.llm_model,
prompt,
system_prompt=system_prompt,
history_messages=history_messages,
base_url=args.llm_binding_host,
api_key=args.llm_binding_api_key,
**kwargs,
)
return optimized_openai_alike_model_complete
def create_optimized_azure_openai_llm_func(
config_cache: LLMConfigCache, args, llm_timeout: int
):
"""Create optimized Azure OpenAI LLM function with pre-processed configuration"""
async def optimized_azure_openai_model_complete(
prompt,
system_prompt=None,
history_messages=None,
keyword_extraction=False,
**kwargs,
) -> str:
from lightrag.llm.azure_openai import azure_openai_complete_if_cache
keyword_extraction = kwargs.pop("keyword_extraction", None)
if keyword_extraction:
kwargs["response_format"] = GPTKeywordExtractionFormat
if history_messages is None:
history_messages = []
# Use pre-processed configuration to avoid repeated parsing
kwargs["timeout"] = llm_timeout
if config_cache.openai_llm_options:
kwargs.update(config_cache.openai_llm_options)
return await azure_openai_complete_if_cache(
args.llm_model,
prompt,
system_prompt=system_prompt,
history_messages=history_messages,
base_url=args.llm_binding_host,
api_key=os.getenv("AZURE_OPENAI_API_KEY", args.llm_binding_api_key),
api_version=os.getenv("AZURE_OPENAI_API_VERSION", "2024-08-01-preview"),
**kwargs,
)
return optimized_azure_openai_model_complete
def create_llm_model_func(binding: str):
"""
Create LLM model function based on binding type.
Uses lazy import to avoid unnecessary dependencies.
Uses optimized functions for OpenAI bindings and lazy import for others.
"""
try:
if binding == "lollms":
@ -255,9 +382,13 @@ def create_app(args):
elif binding == "aws_bedrock":
return bedrock_model_complete # Already defined locally
elif binding == "azure_openai":
return azure_openai_model_complete # Already defined locally
# Use optimized function with pre-processed configuration
return create_optimized_azure_openai_llm_func(
config_cache, args, llm_timeout
)
else: # openai and compatible
return openai_alike_model_complete # Already defined locally
# Use optimized function with pre-processed configuration
return create_optimized_openai_llm_func(config_cache, args, llm_timeout)
except ImportError as e:
raise Exception(f"Failed to import {binding} LLM binding: {e}")
@ -280,15 +411,15 @@ def create_app(args):
raise Exception(f"Failed to import {binding} options: {e}")
return {}
def create_embedding_function_with_lazy_import(
binding, model, host, api_key, dimensions, args
def create_optimized_embedding_function(
config_cache: LLMConfigCache, binding, model, host, api_key, dimensions, args
):
"""
Create embedding function with lazy imports for all bindings.
Replaces the current create_embedding_function with full lazy import support.
Create optimized embedding function with pre-processed configuration for applicable bindings.
Uses lazy imports for all bindings and avoids repeated configuration parsing.
"""
async def embedding_function(texts):
async def optimized_embedding_function(texts):
try:
if binding == "lollms":
from lightrag.llm.lollms import lollms_embed
@ -297,10 +428,17 @@ def create_app(args):
texts, embed_model=model, host=host, api_key=api_key
)
elif binding == "ollama":
from lightrag.llm.binding_options import OllamaEmbeddingOptions
from lightrag.llm.ollama import ollama_embed
ollama_options = OllamaEmbeddingOptions.options_dict(args)
# Use pre-processed configuration if available, otherwise fallback to dynamic parsing
if config_cache.ollama_embedding_options is not None:
ollama_options = config_cache.ollama_embedding_options
else:
# Fallback for cases where config cache wasn't initialized properly
from lightrag.llm.binding_options import OllamaEmbeddingOptions
ollama_options = OllamaEmbeddingOptions.options_dict(args)
return await ollama_embed(
texts,
embed_model=model,
@ -331,78 +469,13 @@ def create_app(args):
except ImportError as e:
raise Exception(f"Failed to import {binding} embedding: {e}")
return embedding_function
return optimized_embedding_function
llm_timeout = get_env_value("LLM_TIMEOUT", DEFAULT_LLM_TIMEOUT, int)
embedding_timeout = get_env_value(
"EMBEDDING_TIMEOUT", DEFAULT_EMBEDDING_TIMEOUT, int
)
async def openai_alike_model_complete(
prompt,
system_prompt=None,
history_messages=None,
keyword_extraction=False,
**kwargs,
) -> str:
# Lazy import
from lightrag.llm.openai import openai_complete_if_cache
from lightrag.llm.binding_options import OpenAILLMOptions
keyword_extraction = kwargs.pop("keyword_extraction", None)
if keyword_extraction:
kwargs["response_format"] = GPTKeywordExtractionFormat
if history_messages is None:
history_messages = []
# Use OpenAI LLM options if available
openai_options = OpenAILLMOptions.options_dict(args)
kwargs["timeout"] = llm_timeout
kwargs.update(openai_options)
return await openai_complete_if_cache(
args.llm_model,
prompt,
system_prompt=system_prompt,
history_messages=history_messages,
base_url=args.llm_binding_host,
api_key=args.llm_binding_api_key,
**kwargs,
)
async def azure_openai_model_complete(
prompt,
system_prompt=None,
history_messages=None,
keyword_extraction=False,
**kwargs,
) -> str:
# Lazy import
from lightrag.llm.azure_openai import azure_openai_complete_if_cache
from lightrag.llm.binding_options import OpenAILLMOptions
keyword_extraction = kwargs.pop("keyword_extraction", None)
if keyword_extraction:
kwargs["response_format"] = GPTKeywordExtractionFormat
if history_messages is None:
history_messages = []
# Use OpenAI LLM options
openai_options = OpenAILLMOptions.options_dict(args)
kwargs["timeout"] = llm_timeout
kwargs.update(openai_options)
return await azure_openai_complete_if_cache(
args.llm_model,
prompt,
system_prompt=system_prompt,
history_messages=history_messages,
base_url=args.llm_binding_host,
api_key=os.getenv("AZURE_OPENAI_API_KEY"),
api_version=os.getenv("AZURE_OPENAI_API_VERSION", "2024-08-01-preview"),
**kwargs,
)
async def bedrock_model_complete(
prompt,
system_prompt=None,
@ -430,16 +503,17 @@ def create_app(args):
**kwargs,
)
# Create embedding function with lazy imports
# Create embedding function with optimized configuration
embedding_func = EmbeddingFunc(
embedding_dim=args.embedding_dim,
func=create_embedding_function_with_lazy_import(
func=create_optimized_embedding_function(
config_cache=config_cache,
binding=args.embedding_binding,
model=args.embedding_model,
host=args.embedding_binding_host,
api_key=args.embedding_binding_api_key,
dimensions=args.embedding_dim,
args=args, # Pass args object for dynamic option generation
args=args, # Pass args object for fallback option generation
),
)

View file

@ -1947,7 +1947,8 @@ def create_document_routes(
- cur_batch (int): Current processing batch
- request_pending (bool): Flag for pending request for processing
- latest_message (str): Latest message from pipeline processing
- history_messages (List[str], optional): List of history messages
- history_messages (List[str], optional): List of history messages (limited to latest 1000 entries,
with truncation message if more than 1000 messages exist)
Raises:
HTTPException: If an error occurs while retrieving pipeline status (500)
@ -1982,8 +1983,28 @@ def create_document_routes(
status_dict["update_status"] = processed_update_status
# Convert history_messages to a regular list if it's a Manager.list
# and limit to latest 1000 entries with truncation message if needed
if "history_messages" in status_dict:
status_dict["history_messages"] = list(status_dict["history_messages"])
history_list = list(status_dict["history_messages"])
total_count = len(history_list)
if total_count > 1000:
# Calculate truncated message count
truncated_count = total_count - 1000
# Take only the latest 1000 messages
latest_messages = history_list[-1000:]
# Add truncation message at the beginning
truncation_message = (
f"[Truncated history messages: {truncated_count}/{total_count}]"
)
status_dict["history_messages"] = [
truncation_message
] + latest_messages
else:
# No truncation needed, return all messages
status_dict["history_messages"] = history_list
# Ensure job_start is properly formatted as a string with timezone information
if "job_start" in status_dict and status_dict["job_start"]:

View file

@ -1 +1 @@
import{e as o,c as l,g as b,k as O,h as P,j as p,l as w,m as c,n as v,t as A,o as N}from"./_baseUniq-Coqzfado.js";import{a_ as g,aw as _,a$ as $,b0 as E,b1 as F,b2 as x,b3 as M,b4 as y,b5 as B,b6 as T}from"./mermaid-vendor-B20mDgAo.js";var S=/\s/;function G(n){for(var r=n.length;r--&&S.test(n.charAt(r)););return r}var H=/^\s+/;function L(n){return n&&n.slice(0,G(n)+1).replace(H,"")}var m=NaN,R=/^[-+]0x[0-9a-f]+$/i,q=/^0b[01]+$/i,z=/^0o[0-7]+$/i,C=parseInt;function K(n){if(typeof n=="number")return n;if(o(n))return m;if(g(n)){var r=typeof n.valueOf=="function"?n.valueOf():n;n=g(r)?r+"":r}if(typeof n!="string")return n===0?n:+n;n=L(n);var t=q.test(n);return t||z.test(n)?C(n.slice(2),t?2:8):R.test(n)?m:+n}var W=1/0,X=17976931348623157e292;function Y(n){if(!n)return n===0?n:0;if(n=K(n),n===W||n===-1/0){var r=n<0?-1:1;return r*X}return n===n?n:0}function D(n){var r=Y(n),t=r%1;return r===r?t?r-t:r:0}function fn(n){var r=n==null?0:n.length;return r?l(n):[]}var I=Object.prototype,J=I.hasOwnProperty,dn=_(function(n,r){n=Object(n);var t=-1,e=r.length,i=e>2?r[2]:void 0;for(i&&$(r[0],r[1],i)&&(e=1);++t<e;)for(var f=r[t],a=E(f),s=-1,d=a.length;++s<d;){var u=a[s],h=n[u];(h===void 0||F(h,I[u])&&!J.call(n,u))&&(n[u]=f[u])}return n});function un(n){var r=n==null?0:n.length;return r?n[r-1]:void 0}function Q(n){return function(r,t,e){var i=Object(r);if(!x(r)){var f=b(t);r=O(r),t=function(s){return f(i[s],s,i)}}var a=n(r,t,e);return a>-1?i[f?r[a]:a]:void 0}}var U=Math.max;function Z(n,r,t){var e=n==null?0:n.length;if(!e)return-1;var i=t==null?0:D(t);return i<0&&(i=U(e+i,0)),P(n,b(r),i)}var hn=Q(Z);function V(n,r){var t=-1,e=x(n)?Array(n.length):[];return p(n,function(i,f,a){e[++t]=r(i,f,a)}),e}function gn(n,r){var t=M(n)?w:V;return t(n,b(r))}var j=Object.prototype,k=j.hasOwnProperty;function nn(n,r){return n!=null&&k.call(n,r)}function bn(n,r){return n!=null&&c(n,r,nn)}function rn(n,r){return n<r}function tn(n,r,t){for(var e=-1,i=n.length;++e<i;){var f=n[e],a=r(f);if(a!=null&&(s===void 0?a===a&&!o(a):t(a,s)))var s=a,d=f}return d}function mn(n){return n&&n.length?tn(n,y,rn):void 0}function an(n,r,t,e){if(!g(n))return n;r=v(r,n);for(var i=-1,f=r.length,a=f-1,s=n;s!=null&&++i<f;){var d=A(r[i]),u=t;if(d==="__proto__"||d==="constructor"||d==="prototype")return n;if(i!=a){var h=s[d];u=void 0,u===void 0&&(u=g(h)?h:B(r[i+1])?[]:{})}T(s,d,u),s=s[d]}return n}function on(n,r,t){for(var e=-1,i=r.length,f={};++e<i;){var a=r[e],s=N(n,a);t(s,a)&&an(f,v(a,n),s)}return f}export{rn as a,tn as b,V as c,on as d,mn as e,fn as f,hn as g,bn as h,dn as i,D as j,un as l,gn as m,Y as t};
import{e as o,c as l,g as b,k as O,h as P,j as p,l as w,m as c,n as v,t as A,o as N}from"./_baseUniq-I-HKKcQv.js";import{a_ as g,aw as _,a$ as $,b0 as E,b1 as F,b2 as x,b3 as M,b4 as y,b5 as B,b6 as T}from"./mermaid-vendor-BNDdXxLk.js";var S=/\s/;function G(n){for(var r=n.length;r--&&S.test(n.charAt(r)););return r}var H=/^\s+/;function L(n){return n&&n.slice(0,G(n)+1).replace(H,"")}var m=NaN,R=/^[-+]0x[0-9a-f]+$/i,q=/^0b[01]+$/i,z=/^0o[0-7]+$/i,C=parseInt;function K(n){if(typeof n=="number")return n;if(o(n))return m;if(g(n)){var r=typeof n.valueOf=="function"?n.valueOf():n;n=g(r)?r+"":r}if(typeof n!="string")return n===0?n:+n;n=L(n);var t=q.test(n);return t||z.test(n)?C(n.slice(2),t?2:8):R.test(n)?m:+n}var W=1/0,X=17976931348623157e292;function Y(n){if(!n)return n===0?n:0;if(n=K(n),n===W||n===-1/0){var r=n<0?-1:1;return r*X}return n===n?n:0}function D(n){var r=Y(n),t=r%1;return r===r?t?r-t:r:0}function fn(n){var r=n==null?0:n.length;return r?l(n):[]}var I=Object.prototype,J=I.hasOwnProperty,dn=_(function(n,r){n=Object(n);var t=-1,e=r.length,i=e>2?r[2]:void 0;for(i&&$(r[0],r[1],i)&&(e=1);++t<e;)for(var f=r[t],a=E(f),s=-1,d=a.length;++s<d;){var u=a[s],h=n[u];(h===void 0||F(h,I[u])&&!J.call(n,u))&&(n[u]=f[u])}return n});function un(n){var r=n==null?0:n.length;return r?n[r-1]:void 0}function Q(n){return function(r,t,e){var i=Object(r);if(!x(r)){var f=b(t);r=O(r),t=function(s){return f(i[s],s,i)}}var a=n(r,t,e);return a>-1?i[f?r[a]:a]:void 0}}var U=Math.max;function Z(n,r,t){var e=n==null?0:n.length;if(!e)return-1;var i=t==null?0:D(t);return i<0&&(i=U(e+i,0)),P(n,b(r),i)}var hn=Q(Z);function V(n,r){var t=-1,e=x(n)?Array(n.length):[];return p(n,function(i,f,a){e[++t]=r(i,f,a)}),e}function gn(n,r){var t=M(n)?w:V;return t(n,b(r))}var j=Object.prototype,k=j.hasOwnProperty;function nn(n,r){return n!=null&&k.call(n,r)}function bn(n,r){return n!=null&&c(n,r,nn)}function rn(n,r){return n<r}function tn(n,r,t){for(var e=-1,i=n.length;++e<i;){var f=n[e],a=r(f);if(a!=null&&(s===void 0?a===a&&!o(a):t(a,s)))var s=a,d=f}return d}function mn(n){return n&&n.length?tn(n,y,rn):void 0}function an(n,r,t,e){if(!g(n))return n;r=v(r,n);for(var i=-1,f=r.length,a=f-1,s=n;s!=null&&++i<f;){var d=A(r[i]),u=t;if(d==="__proto__"||d==="constructor"||d==="prototype")return n;if(i!=a){var h=s[d];u=void 0,u===void 0&&(u=g(h)?h:B(r[i+1])?[]:{})}T(s,d,u),s=s[d]}return n}function on(n,r,t){for(var e=-1,i=r.length,f={};++e<i;){var a=r[e],s=N(n,a);t(s,a)&&an(f,v(a,n),s)}return f}export{rn as a,tn as b,V as c,on as d,mn as e,fn as f,hn as g,bn as h,dn as i,D as j,un as l,gn as m,Y as t};

File diff suppressed because one or more lines are too long

View file

@ -1 +1 @@
import{_ as l}from"./mermaid-vendor-B20mDgAo.js";function m(e,c){var i,t,o;e.accDescr&&((i=c.setAccDescription)==null||i.call(c,e.accDescr)),e.accTitle&&((t=c.setAccTitle)==null||t.call(c,e.accTitle)),e.title&&((o=c.setDiagramTitle)==null||o.call(c,e.title))}l(m,"populateCommonDb");export{m as p};
import{_ as l}from"./mermaid-vendor-BNDdXxLk.js";function m(e,c){var i,t,o;e.accDescr&&((i=c.setAccDescription)==null||i.call(c,e.accDescr)),e.accTitle&&((t=c.setAccTitle)==null||t.call(c,e.accTitle)),e.title&&((o=c.setDiagramTitle)==null||o.call(c,e.title))}l(m,"populateCommonDb");export{m as p};

View file

@ -1 +1 @@
import{_ as n,a2 as x,j as l}from"./mermaid-vendor-B20mDgAo.js";var c=n((a,t)=>{const e=a.append("rect");if(e.attr("x",t.x),e.attr("y",t.y),e.attr("fill",t.fill),e.attr("stroke",t.stroke),e.attr("width",t.width),e.attr("height",t.height),t.name&&e.attr("name",t.name),t.rx&&e.attr("rx",t.rx),t.ry&&e.attr("ry",t.ry),t.attrs!==void 0)for(const r in t.attrs)e.attr(r,t.attrs[r]);return t.class&&e.attr("class",t.class),e},"drawRect"),d=n((a,t)=>{const e={x:t.startx,y:t.starty,width:t.stopx-t.startx,height:t.stopy-t.starty,fill:t.fill,stroke:t.stroke,class:"rect"};c(a,e).lower()},"drawBackgroundRect"),g=n((a,t)=>{const e=t.text.replace(x," "),r=a.append("text");r.attr("x",t.x),r.attr("y",t.y),r.attr("class","legend"),r.style("text-anchor",t.anchor),t.class&&r.attr("class",t.class);const s=r.append("tspan");return s.attr("x",t.x+t.textMargin*2),s.text(e),r},"drawText"),h=n((a,t,e,r)=>{const s=a.append("image");s.attr("x",t),s.attr("y",e);const i=l.sanitizeUrl(r);s.attr("xlink:href",i)},"drawImage"),m=n((a,t,e,r)=>{const s=a.append("use");s.attr("x",t),s.attr("y",e);const i=l.sanitizeUrl(r);s.attr("xlink:href",`#${i}`)},"drawEmbeddedImage"),y=n(()=>({x:0,y:0,width:100,height:100,fill:"#EDF2AE",stroke:"#666",anchor:"start",rx:0,ry:0}),"getNoteRect"),p=n(()=>({x:0,y:0,width:100,height:100,"text-anchor":"start",style:"#666",textMargin:0,rx:0,ry:0,tspan:!0}),"getTextObj");export{d as a,p as b,m as c,c as d,h as e,g as f,y as g};
import{_ as n,a2 as x,j as l}from"./mermaid-vendor-BNDdXxLk.js";var c=n((a,t)=>{const e=a.append("rect");if(e.attr("x",t.x),e.attr("y",t.y),e.attr("fill",t.fill),e.attr("stroke",t.stroke),e.attr("width",t.width),e.attr("height",t.height),t.name&&e.attr("name",t.name),t.rx&&e.attr("rx",t.rx),t.ry&&e.attr("ry",t.ry),t.attrs!==void 0)for(const r in t.attrs)e.attr(r,t.attrs[r]);return t.class&&e.attr("class",t.class),e},"drawRect"),d=n((a,t)=>{const e={x:t.startx,y:t.starty,width:t.stopx-t.startx,height:t.stopy-t.starty,fill:t.fill,stroke:t.stroke,class:"rect"};c(a,e).lower()},"drawBackgroundRect"),g=n((a,t)=>{const e=t.text.replace(x," "),r=a.append("text");r.attr("x",t.x),r.attr("y",t.y),r.attr("class","legend"),r.style("text-anchor",t.anchor),t.class&&r.attr("class",t.class);const s=r.append("tspan");return s.attr("x",t.x+t.textMargin*2),s.text(e),r},"drawText"),h=n((a,t,e,r)=>{const s=a.append("image");s.attr("x",t),s.attr("y",e);const i=l.sanitizeUrl(r);s.attr("xlink:href",i)},"drawImage"),m=n((a,t,e,r)=>{const s=a.append("use");s.attr("x",t),s.attr("y",e);const i=l.sanitizeUrl(r);s.attr("xlink:href",`#${i}`)},"drawEmbeddedImage"),y=n(()=>({x:0,y:0,width:100,height:100,fill:"#EDF2AE",stroke:"#666",anchor:"start",rx:0,ry:0}),"getNoteRect"),p=n(()=>({x:0,y:0,width:100,height:100,"text-anchor":"start",style:"#666",textMargin:0,rx:0,ry:0,tspan:!0}),"getTextObj");export{d as a,p as b,m as c,c as d,h as e,g as f,y as g};

View file

@ -1 +1 @@
import{_ as s}from"./mermaid-vendor-B20mDgAo.js";var t,e=(t=class{constructor(i){this.init=i,this.records=this.init()}reset(){this.records=this.init()}},s(t,"ImperativeState"),t);export{e as I};
import{_ as s}from"./mermaid-vendor-BNDdXxLk.js";var t,e=(t=class{constructor(i){this.init=i,this.records=this.init()}reset(){this.records=this.init()}},s(t,"ImperativeState"),t);export{e as I};

View file

@ -1 +1 @@
import{_ as a,d as o}from"./mermaid-vendor-B20mDgAo.js";var d=a((t,e)=>{let n;return e==="sandbox"&&(n=o("#i"+t)),(e==="sandbox"?o(n.nodes()[0].contentDocument.body):o("body")).select(`[id="${t}"]`)},"getDiagramElement");export{d as g};
import{_ as a,d as o}from"./mermaid-vendor-BNDdXxLk.js";var d=a((t,e)=>{let n;return e==="sandbox"&&(n=o("#i"+t)),(e==="sandbox"?o(n.nodes()[0].contentDocument.body):o("body")).select(`[id="${t}"]`)},"getDiagramElement");export{d as g};

View file

@ -1,4 +1,4 @@
import{_ as e}from"./mermaid-vendor-B20mDgAo.js";var l=e(()=>`
import{_ as e}from"./mermaid-vendor-BNDdXxLk.js";var l=e(()=>`
/* Font Awesome icon styling - consolidated */
.label-icon {
display: inline-block;

View file

@ -1 +1 @@
import{_ as a,e as w,l as x}from"./mermaid-vendor-B20mDgAo.js";var d=a((e,t,i,o)=>{e.attr("class",i);const{width:r,height:h,x:n,y:c}=u(e,t);w(e,h,r,o);const s=l(n,c,r,h,t);e.attr("viewBox",s),x.debug(`viewBox configured: ${s} with padding: ${t}`)},"setupViewPortForSVG"),u=a((e,t)=>{var o;const i=((o=e.node())==null?void 0:o.getBBox())||{width:0,height:0,x:0,y:0};return{width:i.width+t*2,height:i.height+t*2,x:i.x,y:i.y}},"calculateDimensionsWithPadding"),l=a((e,t,i,o,r)=>`${e-r} ${t-r} ${i} ${o}`,"createViewBox");export{d as s};
import{_ as a,e as w,l as x}from"./mermaid-vendor-BNDdXxLk.js";var d=a((e,t,i,o)=>{e.attr("class",i);const{width:r,height:h,x:n,y:c}=u(e,t);w(e,h,r,o);const s=l(n,c,r,h,t);e.attr("viewBox",s),x.debug(`viewBox configured: ${s} with padding: ${t}`)},"setupViewPortForSVG"),u=a((e,t)=>{var o;const i=((o=e.node())==null?void 0:o.getBBox())||{width:0,height:0,x:0,y:0};return{width:i.width+t*2,height:i.height+t*2,x:i.x,y:i.y}},"calculateDimensionsWithPadding"),l=a((e,t,i,o,r)=>`${e-r} ${t-r} ${i} ${o}`,"createViewBox");export{d as s};

View file

@ -0,0 +1 @@
import{s as a,c as s,a as e,C as t}from"./chunk-SZ463SBG-CFfqCI7r.js";import{_ as i}from"./mermaid-vendor-BNDdXxLk.js";import"./chunk-E2GYISFI-Cy7NDsKx.js";import"./chunk-BFAMUDN2-U-jvyawD.js";import"./chunk-SKB7J2MH-Dh8Tfa_E.js";import"./feature-graph-BWr9U7tw.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var c={parser:e,get db(){return new t},renderer:s,styles:a,init:i(r=>{r.class||(r.class={}),r.class.arrowMarkerAbsolute=r.arrowMarkerAbsolute},"init")};export{c as diagram};

View file

@ -1 +0,0 @@
import{s as a,c as s,a as e,C as t}from"./chunk-SZ463SBG-B3Q_dfR2.js";import{_ as i}from"./mermaid-vendor-B20mDgAo.js";import"./chunk-E2GYISFI-Du9g7EeC.js";import"./chunk-BFAMUDN2-Cm57CA8s.js";import"./chunk-SKB7J2MH-CX-6qXaF.js";import"./feature-graph-CS4MyqEv.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var c={parser:e,get db(){return new t},renderer:s,styles:a,init:i(r=>{r.class||(r.class={}),r.class.arrowMarkerAbsolute=r.arrowMarkerAbsolute},"init")};export{c as diagram};

View file

@ -0,0 +1 @@
import{s as a,c as s,a as e,C as t}from"./chunk-SZ463SBG-CFfqCI7r.js";import{_ as i}from"./mermaid-vendor-BNDdXxLk.js";import"./chunk-E2GYISFI-Cy7NDsKx.js";import"./chunk-BFAMUDN2-U-jvyawD.js";import"./chunk-SKB7J2MH-Dh8Tfa_E.js";import"./feature-graph-BWr9U7tw.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var c={parser:e,get db(){return new t},renderer:s,styles:a,init:i(r=>{r.class||(r.class={}),r.class.arrowMarkerAbsolute=r.arrowMarkerAbsolute},"init")};export{c as diagram};

View file

@ -1 +0,0 @@
import{s as a,c as s,a as e,C as t}from"./chunk-SZ463SBG-B3Q_dfR2.js";import{_ as i}from"./mermaid-vendor-B20mDgAo.js";import"./chunk-E2GYISFI-Du9g7EeC.js";import"./chunk-BFAMUDN2-Cm57CA8s.js";import"./chunk-SKB7J2MH-CX-6qXaF.js";import"./feature-graph-CS4MyqEv.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var c={parser:e,get db(){return new t},renderer:s,styles:a,init:i(r=>{r.class||(r.class={}),r.class.arrowMarkerAbsolute=r.arrowMarkerAbsolute},"init")};export{c as diagram};

View file

@ -1 +0,0 @@
import{b as r}from"./_baseUniq-Coqzfado.js";var e=4;function a(o){return r(o,e)}export{a as c};

View file

@ -0,0 +1 @@
import{b as r}from"./_baseUniq-I-HKKcQv.js";var e=4;function a(o){return r(o,e)}export{a as c};

View file

@ -1,4 +1,4 @@
import{p as y}from"./chunk-353BL4L5-CR8qX5CO.js";import{_ as l,s as B,g as S,t as z,q as F,a as P,b as E,F as v,K as W,e as T,z as D,G as _,H as A,l as w}from"./mermaid-vendor-B20mDgAo.js";import{p as N}from"./treemap-75Q7IDZK-64OceOGQ.js";import"./feature-graph-CS4MyqEv.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";import"./_baseUniq-Coqzfado.js";import"./_basePickBy-zcTWmF8I.js";import"./clone-D1fuhfq3.js";var x={packet:[]},m=structuredClone(x),L=A.packet,Y=l(()=>{const t=v({...L,..._().packet});return t.showBits&&(t.paddingY+=10),t},"getConfig"),G=l(()=>m.packet,"getPacket"),H=l(t=>{t.length>0&&m.packet.push(t)},"pushWord"),I=l(()=>{D(),m=structuredClone(x)},"clear"),u={pushWord:H,getPacket:G,getConfig:Y,clear:I,setAccTitle:E,getAccTitle:P,setDiagramTitle:F,getDiagramTitle:z,getAccDescription:S,setAccDescription:B},K=1e4,M=l(t=>{y(t,u);let e=-1,o=[],n=1;const{bitsPerRow:i}=u.getConfig();for(let{start:a,end:r,bits:c,label:f}of t.blocks){if(a!==void 0&&r!==void 0&&r<a)throw new Error(`Packet block ${a} - ${r} is invalid. End must be greater than start.`);if(a??(a=e+1),a!==e+1)throw new Error(`Packet block ${a} - ${r??a} is not contiguous. It should start from ${e+1}.`);if(c===0)throw new Error(`Packet block ${a} is invalid. Cannot have a zero bit field.`);for(r??(r=a+(c??1)-1),c??(c=r-a+1),e=r,w.debug(`Packet block ${a} - ${e} with label ${f}`);o.length<=i+1&&u.getPacket().length<K;){const[d,p]=O({start:a,end:r,bits:c,label:f},n,i);if(o.push(d),d.end+1===n*i&&(u.pushWord(o),o=[],n++),!p)break;({start:a,end:r,bits:c,label:f}=p)}}u.pushWord(o)},"populate"),O=l((t,e,o)=>{if(t.start===void 0)throw new Error("start should have been set during first phase");if(t.end===void 0)throw new Error("end should have been set during first phase");if(t.start>t.end)throw new Error(`Block start ${t.start} is greater than block end ${t.end}.`);if(t.end+1<=e*o)return[t,void 0];const n=e*o-1,i=e*o;return[{start:t.start,end:n,label:t.label,bits:n-t.start},{start:i,end:t.end,label:t.label,bits:t.end-i}]},"getNextFittingBlock"),q={parse:l(async t=>{const e=await N("packet",t);w.debug(e),M(e)},"parse")},R=l((t,e,o,n)=>{const i=n.db,a=i.getConfig(),{rowHeight:r,paddingY:c,bitWidth:f,bitsPerRow:d}=a,p=i.getPacket(),s=i.getDiagramTitle(),k=r+c,g=k*(p.length+1)-(s?0:r),b=f*d+2,h=W(e);h.attr("viewbox",`0 0 ${b} ${g}`),T(h,g,b,a.useMaxWidth);for(const[C,$]of p.entries())U(h,$,C,a);h.append("text").text(s).attr("x",b/2).attr("y",g-k/2).attr("dominant-baseline","middle").attr("text-anchor","middle").attr("class","packetTitle")},"draw"),U=l((t,e,o,{rowHeight:n,paddingX:i,paddingY:a,bitWidth:r,bitsPerRow:c,showBits:f})=>{const d=t.append("g"),p=o*(n+a)+a;for(const s of e){const k=s.start%c*r+1,g=(s.end-s.start+1)*r-i;if(d.append("rect").attr("x",k).attr("y",p).attr("width",g).attr("height",n).attr("class","packetBlock"),d.append("text").attr("x",k+g/2).attr("y",p+n/2).attr("class","packetLabel").attr("dominant-baseline","middle").attr("text-anchor","middle").text(s.label),!f)continue;const b=s.end===s.start,h=p-2;d.append("text").attr("x",k+(b?g/2:0)).attr("y",h).attr("class","packetByte start").attr("dominant-baseline","auto").attr("text-anchor",b?"middle":"start").text(s.start),b||d.append("text").attr("x",k+g).attr("y",h).attr("class","packetByte end").attr("dominant-baseline","auto").attr("text-anchor","end").text(s.end)}},"drawWord"),X={draw:R},j={byteFontSize:"10px",startByteColor:"black",endByteColor:"black",labelColor:"black",labelFontSize:"12px",titleColor:"black",titleFontSize:"14px",blockStrokeColor:"black",blockStrokeWidth:"1",blockFillColor:"#efefef"},J=l(({packet:t}={})=>{const e=v(j,t);return`
import{p as y}from"./chunk-353BL4L5-_6GQP54L.js";import{_ as l,s as B,g as S,t as z,q as F,a as P,b as E,F as v,K as W,e as T,z as D,G as _,H as A,l as w}from"./mermaid-vendor-BNDdXxLk.js";import{p as N}from"./treemap-75Q7IDZK-BTRfYg24.js";import"./feature-graph-BWr9U7tw.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";import"./_baseUniq-I-HKKcQv.js";import"./_basePickBy-BPAKxgo7.js";import"./clone-DwaenhoE.js";var x={packet:[]},m=structuredClone(x),L=A.packet,Y=l(()=>{const t=v({...L,..._().packet});return t.showBits&&(t.paddingY+=10),t},"getConfig"),G=l(()=>m.packet,"getPacket"),H=l(t=>{t.length>0&&m.packet.push(t)},"pushWord"),I=l(()=>{D(),m=structuredClone(x)},"clear"),u={pushWord:H,getPacket:G,getConfig:Y,clear:I,setAccTitle:E,getAccTitle:P,setDiagramTitle:F,getDiagramTitle:z,getAccDescription:S,setAccDescription:B},K=1e4,M=l(t=>{y(t,u);let e=-1,o=[],n=1;const{bitsPerRow:i}=u.getConfig();for(let{start:a,end:r,bits:c,label:f}of t.blocks){if(a!==void 0&&r!==void 0&&r<a)throw new Error(`Packet block ${a} - ${r} is invalid. End must be greater than start.`);if(a??(a=e+1),a!==e+1)throw new Error(`Packet block ${a} - ${r??a} is not contiguous. It should start from ${e+1}.`);if(c===0)throw new Error(`Packet block ${a} is invalid. Cannot have a zero bit field.`);for(r??(r=a+(c??1)-1),c??(c=r-a+1),e=r,w.debug(`Packet block ${a} - ${e} with label ${f}`);o.length<=i+1&&u.getPacket().length<K;){const[d,p]=O({start:a,end:r,bits:c,label:f},n,i);if(o.push(d),d.end+1===n*i&&(u.pushWord(o),o=[],n++),!p)break;({start:a,end:r,bits:c,label:f}=p)}}u.pushWord(o)},"populate"),O=l((t,e,o)=>{if(t.start===void 0)throw new Error("start should have been set during first phase");if(t.end===void 0)throw new Error("end should have been set during first phase");if(t.start>t.end)throw new Error(`Block start ${t.start} is greater than block end ${t.end}.`);if(t.end+1<=e*o)return[t,void 0];const n=e*o-1,i=e*o;return[{start:t.start,end:n,label:t.label,bits:n-t.start},{start:i,end:t.end,label:t.label,bits:t.end-i}]},"getNextFittingBlock"),q={parse:l(async t=>{const e=await N("packet",t);w.debug(e),M(e)},"parse")},R=l((t,e,o,n)=>{const i=n.db,a=i.getConfig(),{rowHeight:r,paddingY:c,bitWidth:f,bitsPerRow:d}=a,p=i.getPacket(),s=i.getDiagramTitle(),k=r+c,g=k*(p.length+1)-(s?0:r),b=f*d+2,h=W(e);h.attr("viewbox",`0 0 ${b} ${g}`),T(h,g,b,a.useMaxWidth);for(const[C,$]of p.entries())U(h,$,C,a);h.append("text").text(s).attr("x",b/2).attr("y",g-k/2).attr("dominant-baseline","middle").attr("text-anchor","middle").attr("class","packetTitle")},"draw"),U=l((t,e,o,{rowHeight:n,paddingX:i,paddingY:a,bitWidth:r,bitsPerRow:c,showBits:f})=>{const d=t.append("g"),p=o*(n+a)+a;for(const s of e){const k=s.start%c*r+1,g=(s.end-s.start+1)*r-i;if(d.append("rect").attr("x",k).attr("y",p).attr("width",g).attr("height",n).attr("class","packetBlock"),d.append("text").attr("x",k+g/2).attr("y",p+n/2).attr("class","packetLabel").attr("dominant-baseline","middle").attr("text-anchor","middle").text(s.label),!f)continue;const b=s.end===s.start,h=p-2;d.append("text").attr("x",k+(b?g/2:0)).attr("y",h).attr("class","packetByte start").attr("dominant-baseline","auto").attr("text-anchor",b?"middle":"start").text(s.start),b||d.append("text").attr("x",k+g).attr("y",h).attr("class","packetByte end").attr("dominant-baseline","auto").attr("text-anchor","end").text(s.end)}},"drawWord"),X={draw:R},j={byteFontSize:"10px",startByteColor:"black",endByteColor:"black",labelColor:"black",labelFontSize:"12px",titleColor:"black",titleFontSize:"14px",blockStrokeColor:"black",blockStrokeWidth:"1",blockFillColor:"#efefef"},J=l(({packet:t}={})=>{const e=v(j,t);return`
.packetByte {
font-size: ${e.byteFontSize};
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1,4 +1,4 @@
import{g as q1}from"./chunk-E2GYISFI-Du9g7EeC.js";import{_ as m,o as O1,l as ee,c as be,d as Se,p as H1,r as X1,u as i1,b as Q1,s as J1,q as Z1,a as $1,g as et,t as tt,k as st,v as it,J as rt,x as nt,y as s1,z as at,A as ut,B as lt,C as ot}from"./mermaid-vendor-B20mDgAo.js";import{g as ct}from"./chunk-BFAMUDN2-Cm57CA8s.js";import{s as ht}from"./chunk-SKB7J2MH-CX-6qXaF.js";import"./feature-graph-CS4MyqEv.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var dt="flowchart-",Pe,pt=(Pe=class{constructor(){this.vertexCounter=0,this.config=be(),this.vertices=new Map,this.edges=[],this.classes=new Map,this.subGraphs=[],this.subGraphLookup=new Map,this.tooltips=new Map,this.subCount=0,this.firstGraphFlag=!0,this.secCount=-1,this.posCrossRef=[],this.funs=[],this.setAccTitle=Q1,this.setAccDescription=J1,this.setDiagramTitle=Z1,this.getAccTitle=$1,this.getAccDescription=et,this.getDiagramTitle=tt,this.funs.push(this.setupToolTips.bind(this)),this.addVertex=this.addVertex.bind(this),this.firstGraph=this.firstGraph.bind(this),this.setDirection=this.setDirection.bind(this),this.addSubGraph=this.addSubGraph.bind(this),this.addLink=this.addLink.bind(this),this.setLink=this.setLink.bind(this),this.updateLink=this.updateLink.bind(this),this.addClass=this.addClass.bind(this),this.setClass=this.setClass.bind(this),this.destructLink=this.destructLink.bind(this),this.setClickEvent=this.setClickEvent.bind(this),this.setTooltip=this.setTooltip.bind(this),this.updateLinkInterpolate=this.updateLinkInterpolate.bind(this),this.setClickFun=this.setClickFun.bind(this),this.bindFunctions=this.bindFunctions.bind(this),this.lex={firstGraph:this.firstGraph.bind(this)},this.clear(),this.setGen("gen-2")}sanitizeText(i){return st.sanitizeText(i,this.config)}lookUpDomId(i){for(const n of this.vertices.values())if(n.id===i)return n.domId;return i}addVertex(i,n,a,u,l,f,c={},A){var V,C;if(!i||i.trim().length===0)return;let r;if(A!==void 0){let p;A.includes(`
import{g as q1}from"./chunk-E2GYISFI-Cy7NDsKx.js";import{_ as m,o as O1,l as ee,c as be,d as Se,p as H1,r as X1,u as i1,b as Q1,s as J1,q as Z1,a as $1,g as et,t as tt,k as st,v as it,J as rt,x as nt,y as s1,z as at,A as ut,B as lt,C as ot}from"./mermaid-vendor-BNDdXxLk.js";import{g as ct}from"./chunk-BFAMUDN2-U-jvyawD.js";import{s as ht}from"./chunk-SKB7J2MH-Dh8Tfa_E.js";import"./feature-graph-BWr9U7tw.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var dt="flowchart-",Pe,pt=(Pe=class{constructor(){this.vertexCounter=0,this.config=be(),this.vertices=new Map,this.edges=[],this.classes=new Map,this.subGraphs=[],this.subGraphLookup=new Map,this.tooltips=new Map,this.subCount=0,this.firstGraphFlag=!0,this.secCount=-1,this.posCrossRef=[],this.funs=[],this.setAccTitle=Q1,this.setAccDescription=J1,this.setDiagramTitle=Z1,this.getAccTitle=$1,this.getAccDescription=et,this.getDiagramTitle=tt,this.funs.push(this.setupToolTips.bind(this)),this.addVertex=this.addVertex.bind(this),this.firstGraph=this.firstGraph.bind(this),this.setDirection=this.setDirection.bind(this),this.addSubGraph=this.addSubGraph.bind(this),this.addLink=this.addLink.bind(this),this.setLink=this.setLink.bind(this),this.updateLink=this.updateLink.bind(this),this.addClass=this.addClass.bind(this),this.setClass=this.setClass.bind(this),this.destructLink=this.destructLink.bind(this),this.setClickEvent=this.setClickEvent.bind(this),this.setTooltip=this.setTooltip.bind(this),this.updateLinkInterpolate=this.updateLinkInterpolate.bind(this),this.setClickFun=this.setClickFun.bind(this),this.bindFunctions=this.bindFunctions.bind(this),this.lex={firstGraph:this.firstGraph.bind(this)},this.clear(),this.setGen("gen-2")}sanitizeText(i){return st.sanitizeText(i,this.config)}lookUpDomId(i){for(const n of this.vertices.values())if(n.id===i)return n.domId;return i}addVertex(i,n,a,u,l,f,c={},A){var V,C;if(!i||i.trim().length===0)return;let r;if(A!==void 0){let p;A.includes(`
`)?p=A+`
`:p=`{
`+A+`

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1,2 +1,2 @@
import{_ as e,l as o,K as i,e as n,L as p}from"./mermaid-vendor-B20mDgAo.js";import{p as m}from"./treemap-75Q7IDZK-64OceOGQ.js";import"./feature-graph-CS4MyqEv.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";import"./_baseUniq-Coqzfado.js";import"./_basePickBy-zcTWmF8I.js";import"./clone-D1fuhfq3.js";var g={parse:e(async r=>{const a=await m("info",r);o.debug(a)},"parse")},v={version:p.version+""},d=e(()=>v.version,"getVersion"),c={getVersion:d},l=e((r,a,s)=>{o.debug(`rendering info diagram
import{_ as e,l as o,K as i,e as n,L as p}from"./mermaid-vendor-BNDdXxLk.js";import{p as m}from"./treemap-75Q7IDZK-BTRfYg24.js";import"./feature-graph-BWr9U7tw.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";import"./_baseUniq-I-HKKcQv.js";import"./_basePickBy-BPAKxgo7.js";import"./clone-DwaenhoE.js";var g={parse:e(async r=>{const a=await m("info",r);o.debug(a)},"parse")},v={version:p.version+""},d=e(()=>v.version,"getVersion"),c={getVersion:d},l=e((r,a,s)=>{o.debug(`rendering info diagram
`+r);const t=i(a);n(t,100,400,!0),t.append("g").append("text").attr("x",100).attr("y",40).attr("class","version").attr("font-size",32).style("text-anchor","middle").text(`v${s}`)},"draw"),f={draw:l},L={parser:g,db:c,renderer:f};export{L as diagram};

View file

@ -1,4 +1,4 @@
import{a as gt,g as lt,f as mt,d as xt}from"./chunk-67H74DCK-CIrK_RBz.js";import{g as kt}from"./chunk-E2GYISFI-Du9g7EeC.js";import{_ as r,g as _t,s as bt,a as vt,b as wt,t as Tt,q as St,c as R,d as G,e as $t,z as Mt,N as et}from"./mermaid-vendor-B20mDgAo.js";import"./feature-graph-CS4MyqEv.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var U=function(){var t=r(function(h,n,a,l){for(a=a||{},l=h.length;l--;a[h[l]]=n);return a},"o"),e=[6,8,10,11,12,14,16,17,18],s=[1,9],c=[1,10],i=[1,11],f=[1,12],u=[1,13],y=[1,14],g={trace:r(function(){},"trace"),yy:{},symbols_:{error:2,start:3,journey:4,document:5,EOF:6,line:7,SPACE:8,statement:9,NEWLINE:10,title:11,acc_title:12,acc_title_value:13,acc_descr:14,acc_descr_value:15,acc_descr_multiline_value:16,section:17,taskName:18,taskData:19,$accept:0,$end:1},terminals_:{2:"error",4:"journey",6:"EOF",8:"SPACE",10:"NEWLINE",11:"title",12:"acc_title",13:"acc_title_value",14:"acc_descr",15:"acc_descr_value",16:"acc_descr_multiline_value",17:"section",18:"taskName",19:"taskData"},productions_:[0,[3,3],[5,0],[5,2],[7,2],[7,1],[7,1],[7,1],[9,1],[9,2],[9,2],[9,1],[9,1],[9,2]],performAction:r(function(n,a,l,d,p,o,v){var k=o.length-1;switch(p){case 1:return o[k-1];case 2:this.$=[];break;case 3:o[k-1].push(o[k]),this.$=o[k-1];break;case 4:case 5:this.$=o[k];break;case 6:case 7:this.$=[];break;case 8:d.setDiagramTitle(o[k].substr(6)),this.$=o[k].substr(6);break;case 9:this.$=o[k].trim(),d.setAccTitle(this.$);break;case 10:case 11:this.$=o[k].trim(),d.setAccDescription(this.$);break;case 12:d.addSection(o[k].substr(8)),this.$=o[k].substr(8);break;case 13:d.addTask(o[k-1],o[k]),this.$="task";break}},"anonymous"),table:[{3:1,4:[1,2]},{1:[3]},t(e,[2,2],{5:3}),{6:[1,4],7:5,8:[1,6],9:7,10:[1,8],11:s,12:c,14:i,16:f,17:u,18:y},t(e,[2,7],{1:[2,1]}),t(e,[2,3]),{9:15,11:s,12:c,14:i,16:f,17:u,18:y},t(e,[2,5]),t(e,[2,6]),t(e,[2,8]),{13:[1,16]},{15:[1,17]},t(e,[2,11]),t(e,[2,12]),{19:[1,18]},t(e,[2,4]),t(e,[2,9]),t(e,[2,10]),t(e,[2,13])],defaultActions:{},parseError:r(function(n,a){if(a.recoverable)this.trace(n);else{var l=new Error(n);throw l.hash=a,l}},"parseError"),parse:r(function(n){var a=this,l=[0],d=[],p=[null],o=[],v=this.table,k="",C=0,K=0,dt=2,Q=1,yt=o.slice.call(arguments,1),_=Object.create(this.lexer),I={yy:{}};for(var O in this.yy)Object.prototype.hasOwnProperty.call(this.yy,O)&&(I.yy[O]=this.yy[O]);_.setInput(n,I.yy),I.yy.lexer=_,I.yy.parser=this,typeof _.yylloc>"u"&&(_.yylloc={});var Y=_.yylloc;o.push(Y);var ft=_.options&&_.options.ranges;typeof I.yy.parseError=="function"?this.parseError=I.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;function pt(w){l.length=l.length-2*w,p.length=p.length-w,o.length=o.length-w}r(pt,"popStack");function D(){var w;return w=d.pop()||_.lex()||Q,typeof w!="number"&&(w instanceof Array&&(d=w,w=d.pop()),w=a.symbols_[w]||w),w}r(D,"lex");for(var b,A,T,q,F={},N,M,tt,z;;){if(A=l[l.length-1],this.defaultActions[A]?T=this.defaultActions[A]:((b===null||typeof b>"u")&&(b=D()),T=v[A]&&v[A][b]),typeof T>"u"||!T.length||!T[0]){var X="";z=[];for(N in v[A])this.terminals_[N]&&N>dt&&z.push("'"+this.terminals_[N]+"'");_.showPosition?X="Parse error on line "+(C+1)+`:
import{a as gt,g as lt,f as mt,d as xt}from"./chunk-67H74DCK-CRR2oQaz.js";import{g as kt}from"./chunk-E2GYISFI-Cy7NDsKx.js";import{_ as r,g as _t,s as bt,a as vt,b as wt,t as Tt,q as St,c as R,d as G,e as $t,z as Mt,N as et}from"./mermaid-vendor-BNDdXxLk.js";import"./feature-graph-BWr9U7tw.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var U=function(){var t=r(function(h,n,a,l){for(a=a||{},l=h.length;l--;a[h[l]]=n);return a},"o"),e=[6,8,10,11,12,14,16,17,18],s=[1,9],c=[1,10],i=[1,11],f=[1,12],u=[1,13],y=[1,14],g={trace:r(function(){},"trace"),yy:{},symbols_:{error:2,start:3,journey:4,document:5,EOF:6,line:7,SPACE:8,statement:9,NEWLINE:10,title:11,acc_title:12,acc_title_value:13,acc_descr:14,acc_descr_value:15,acc_descr_multiline_value:16,section:17,taskName:18,taskData:19,$accept:0,$end:1},terminals_:{2:"error",4:"journey",6:"EOF",8:"SPACE",10:"NEWLINE",11:"title",12:"acc_title",13:"acc_title_value",14:"acc_descr",15:"acc_descr_value",16:"acc_descr_multiline_value",17:"section",18:"taskName",19:"taskData"},productions_:[0,[3,3],[5,0],[5,2],[7,2],[7,1],[7,1],[7,1],[9,1],[9,2],[9,2],[9,1],[9,1],[9,2]],performAction:r(function(n,a,l,d,p,o,v){var k=o.length-1;switch(p){case 1:return o[k-1];case 2:this.$=[];break;case 3:o[k-1].push(o[k]),this.$=o[k-1];break;case 4:case 5:this.$=o[k];break;case 6:case 7:this.$=[];break;case 8:d.setDiagramTitle(o[k].substr(6)),this.$=o[k].substr(6);break;case 9:this.$=o[k].trim(),d.setAccTitle(this.$);break;case 10:case 11:this.$=o[k].trim(),d.setAccDescription(this.$);break;case 12:d.addSection(o[k].substr(8)),this.$=o[k].substr(8);break;case 13:d.addTask(o[k-1],o[k]),this.$="task";break}},"anonymous"),table:[{3:1,4:[1,2]},{1:[3]},t(e,[2,2],{5:3}),{6:[1,4],7:5,8:[1,6],9:7,10:[1,8],11:s,12:c,14:i,16:f,17:u,18:y},t(e,[2,7],{1:[2,1]}),t(e,[2,3]),{9:15,11:s,12:c,14:i,16:f,17:u,18:y},t(e,[2,5]),t(e,[2,6]),t(e,[2,8]),{13:[1,16]},{15:[1,17]},t(e,[2,11]),t(e,[2,12]),{19:[1,18]},t(e,[2,4]),t(e,[2,9]),t(e,[2,10]),t(e,[2,13])],defaultActions:{},parseError:r(function(n,a){if(a.recoverable)this.trace(n);else{var l=new Error(n);throw l.hash=a,l}},"parseError"),parse:r(function(n){var a=this,l=[0],d=[],p=[null],o=[],v=this.table,k="",C=0,K=0,dt=2,Q=1,yt=o.slice.call(arguments,1),_=Object.create(this.lexer),I={yy:{}};for(var O in this.yy)Object.prototype.hasOwnProperty.call(this.yy,O)&&(I.yy[O]=this.yy[O]);_.setInput(n,I.yy),I.yy.lexer=_,I.yy.parser=this,typeof _.yylloc>"u"&&(_.yylloc={});var Y=_.yylloc;o.push(Y);var ft=_.options&&_.options.ranges;typeof I.yy.parseError=="function"?this.parseError=I.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;function pt(w){l.length=l.length-2*w,p.length=p.length-w,o.length=o.length-w}r(pt,"popStack");function D(){var w;return w=d.pop()||_.lex()||Q,typeof w!="number"&&(w instanceof Array&&(d=w,w=d.pop()),w=a.symbols_[w]||w),w}r(D,"lex");for(var b,A,T,q,F={},N,M,tt,z;;){if(A=l[l.length-1],this.defaultActions[A]?T=this.defaultActions[A]:((b===null||typeof b>"u")&&(b=D()),T=v[A]&&v[A][b]),typeof T>"u"||!T.length||!T[0]){var X="";z=[];for(N in v[A])this.terminals_[N]&&N>dt&&z.push("'"+this.terminals_[N]+"'");_.showPosition?X="Parse error on line "+(C+1)+`:
`+_.showPosition()+`
Expecting `+z.join(", ")+", got '"+(this.terminals_[b]||b)+"'":X="Parse error on line "+(C+1)+": Unexpected "+(b==Q?"end of input":"'"+(this.terminals_[b]||b)+"'"),this.parseError(X,{text:_.match,token:this.terminals_[b]||b,line:_.yylineno,loc:Y,expected:z})}if(T[0]instanceof Array&&T.length>1)throw new Error("Parse Error: multiple actions possible at state: "+A+", token: "+b);switch(T[0]){case 1:l.push(b),p.push(_.yytext),o.push(_.yylloc),l.push(T[1]),b=null,K=_.yyleng,k=_.yytext,C=_.yylineno,Y=_.yylloc;break;case 2:if(M=this.productions_[T[1]][1],F.$=p[p.length-M],F._$={first_line:o[o.length-(M||1)].first_line,last_line:o[o.length-1].last_line,first_column:o[o.length-(M||1)].first_column,last_column:o[o.length-1].last_column},ft&&(F._$.range=[o[o.length-(M||1)].range[0],o[o.length-1].range[1]]),q=this.performAction.apply(F,[k,K,C,I.yy,T[1],p,o].concat(yt)),typeof q<"u")return q;M&&(l=l.slice(0,-1*M*2),p=p.slice(0,-1*M),o=o.slice(0,-1*M)),l.push(this.productions_[T[1]][0]),p.push(F.$),o.push(F._$),tt=v[l[l.length-2]][l[l.length-1]],l.push(tt);break;case 3:return!0}}return!0},"parse")},m=function(){var h={EOF:1,parseError:r(function(a,l){if(this.yy.parser)this.yy.parser.parseError(a,l);else throw new Error(a)},"parseError"),setInput:r(function(n,a){return this.yy=a||this.yy||{},this._input=n,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},"setInput"),input:r(function(){var n=this._input[0];this.yytext+=n,this.yyleng++,this.offset++,this.match+=n,this.matched+=n;var a=n.match(/(?:\r\n?|\n).*/g);return a?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),n},"input"),unput:r(function(n){var a=n.length,l=n.split(/(?:\r\n?|\n)/g);this._input=n+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-a),this.offset-=a;var d=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),l.length-1&&(this.yylineno-=l.length-1);var p=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:l?(l.length===d.length?this.yylloc.first_column:0)+d[d.length-l.length].length-l[0].length:this.yylloc.first_column-a},this.options.ranges&&(this.yylloc.range=[p[0],p[0]+this.yyleng-a]),this.yyleng=this.yytext.length,this},"unput"),more:r(function(){return this._more=!0,this},"more"),reject:r(function(){if(this.options.backtrack_lexer)this._backtrack=!0;else return this.parseError("Lexical error on line "+(this.yylineno+1)+`. You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).
`+this.showPosition(),{text:"",token:null,line:this.yylineno});return this},"reject"),less:r(function(n){this.unput(this.match.slice(n))},"less"),pastInput:r(function(){var n=this.matched.substr(0,this.matched.length-this.match.length);return(n.length>20?"...":"")+n.substr(-20).replace(/\n/g,"")},"pastInput"),upcomingInput:r(function(){var n=this.match;return n.length<20&&(n+=this._input.substr(0,20-n.length)),(n.substr(0,20)+(n.length>20?"...":"")).replace(/\n/g,"")},"upcomingInput"),showPosition:r(function(){var n=this.pastInput(),a=new Array(n.length+1).join("-");return n+this.upcomingInput()+`

View file

@ -1,4 +1,4 @@
import{g as fe}from"./chunk-E2GYISFI-Du9g7EeC.js";import{_ as c,l as te,c as W,K as ye,a8 as be,a9 as me,aa as _e,a3 as Ee,H as Y,i as G,v as ke,J as Se,a4 as Ne,a5 as le,a6 as ce}from"./mermaid-vendor-B20mDgAo.js";import"./feature-graph-CS4MyqEv.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var $=function(){var t=c(function(_,s,n,a){for(n=n||{},a=_.length;a--;n[_[a]]=s);return n},"o"),g=[1,4],d=[1,13],r=[1,12],p=[1,15],E=[1,16],f=[1,20],h=[1,19],L=[6,7,8],C=[1,26],w=[1,24],N=[1,25],i=[6,7,11],H=[1,31],x=[6,7,11,24],P=[1,6,13,16,17,20,23],M=[1,35],U=[1,36],A=[1,6,7,11,13,16,17,20,23],j=[1,38],V={trace:c(function(){},"trace"),yy:{},symbols_:{error:2,start:3,mindMap:4,spaceLines:5,SPACELINE:6,NL:7,KANBAN:8,document:9,stop:10,EOF:11,statement:12,SPACELIST:13,node:14,shapeData:15,ICON:16,CLASS:17,nodeWithId:18,nodeWithoutId:19,NODE_DSTART:20,NODE_DESCR:21,NODE_DEND:22,NODE_ID:23,SHAPE_DATA:24,$accept:0,$end:1},terminals_:{2:"error",6:"SPACELINE",7:"NL",8:"KANBAN",11:"EOF",13:"SPACELIST",16:"ICON",17:"CLASS",20:"NODE_DSTART",21:"NODE_DESCR",22:"NODE_DEND",23:"NODE_ID",24:"SHAPE_DATA"},productions_:[0,[3,1],[3,2],[5,1],[5,2],[5,2],[4,2],[4,3],[10,1],[10,1],[10,1],[10,2],[10,2],[9,3],[9,2],[12,3],[12,2],[12,2],[12,2],[12,1],[12,2],[12,1],[12,1],[12,1],[12,1],[14,1],[14,1],[19,3],[18,1],[18,4],[15,2],[15,1]],performAction:c(function(s,n,a,o,u,e,B){var l=e.length-1;switch(u){case 6:case 7:return o;case 8:o.getLogger().trace("Stop NL ");break;case 9:o.getLogger().trace("Stop EOF ");break;case 11:o.getLogger().trace("Stop NL2 ");break;case 12:o.getLogger().trace("Stop EOF2 ");break;case 15:o.getLogger().info("Node: ",e[l-1].id),o.addNode(e[l-2].length,e[l-1].id,e[l-1].descr,e[l-1].type,e[l]);break;case 16:o.getLogger().info("Node: ",e[l].id),o.addNode(e[l-1].length,e[l].id,e[l].descr,e[l].type);break;case 17:o.getLogger().trace("Icon: ",e[l]),o.decorateNode({icon:e[l]});break;case 18:case 23:o.decorateNode({class:e[l]});break;case 19:o.getLogger().trace("SPACELIST");break;case 20:o.getLogger().trace("Node: ",e[l-1].id),o.addNode(0,e[l-1].id,e[l-1].descr,e[l-1].type,e[l]);break;case 21:o.getLogger().trace("Node: ",e[l].id),o.addNode(0,e[l].id,e[l].descr,e[l].type);break;case 22:o.decorateNode({icon:e[l]});break;case 27:o.getLogger().trace("node found ..",e[l-2]),this.$={id:e[l-1],descr:e[l-1],type:o.getType(e[l-2],e[l])};break;case 28:this.$={id:e[l],descr:e[l],type:0};break;case 29:o.getLogger().trace("node found ..",e[l-3]),this.$={id:e[l-3],descr:e[l-1],type:o.getType(e[l-2],e[l])};break;case 30:this.$=e[l-1]+e[l];break;case 31:this.$=e[l];break}},"anonymous"),table:[{3:1,4:2,5:3,6:[1,5],8:g},{1:[3]},{1:[2,1]},{4:6,6:[1,7],7:[1,8],8:g},{6:d,7:[1,10],9:9,12:11,13:r,14:14,16:p,17:E,18:17,19:18,20:f,23:h},t(L,[2,3]),{1:[2,2]},t(L,[2,4]),t(L,[2,5]),{1:[2,6],6:d,12:21,13:r,14:14,16:p,17:E,18:17,19:18,20:f,23:h},{6:d,9:22,12:11,13:r,14:14,16:p,17:E,18:17,19:18,20:f,23:h},{6:C,7:w,10:23,11:N},t(i,[2,24],{18:17,19:18,14:27,16:[1,28],17:[1,29],20:f,23:h}),t(i,[2,19]),t(i,[2,21],{15:30,24:H}),t(i,[2,22]),t(i,[2,23]),t(x,[2,25]),t(x,[2,26]),t(x,[2,28],{20:[1,32]}),{21:[1,33]},{6:C,7:w,10:34,11:N},{1:[2,7],6:d,12:21,13:r,14:14,16:p,17:E,18:17,19:18,20:f,23:h},t(P,[2,14],{7:M,11:U}),t(A,[2,8]),t(A,[2,9]),t(A,[2,10]),t(i,[2,16],{15:37,24:H}),t(i,[2,17]),t(i,[2,18]),t(i,[2,20],{24:j}),t(x,[2,31]),{21:[1,39]},{22:[1,40]},t(P,[2,13],{7:M,11:U}),t(A,[2,11]),t(A,[2,12]),t(i,[2,15],{24:j}),t(x,[2,30]),{22:[1,41]},t(x,[2,27]),t(x,[2,29])],defaultActions:{2:[2,1],6:[2,2]},parseError:c(function(s,n){if(n.recoverable)this.trace(s);else{var a=new Error(s);throw a.hash=n,a}},"parseError"),parse:c(function(s){var n=this,a=[0],o=[],u=[null],e=[],B=this.table,l="",z=0,ie=0,ue=2,re=1,ge=e.slice.call(arguments,1),b=Object.create(this.lexer),T={yy:{}};for(var J in this.yy)Object.prototype.hasOwnProperty.call(this.yy,J)&&(T.yy[J]=this.yy[J]);b.setInput(s,T.yy),T.yy.lexer=b,T.yy.parser=this,typeof b.yylloc>"u"&&(b.yylloc={});var q=b.yylloc;e.push(q);var de=b.options&&b.options.ranges;typeof T.yy.parseError=="function"?this.parseError=T.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;function pe(S){a.length=a.length-2*S,u.length=u.length-S,e.length=e.length-S}c(pe,"popStack");function ae(){var S;return S=o.pop()||b.lex()||re,typeof S!="number"&&(S instanceof Array&&(o=S,S=o.pop()),S=n.symbols_[S]||S),S}c(ae,"lex");for(var k,R,v,Q,F={},K,I,oe,X;;){if(R=a[a.length-1],this.defaultActions[R]?v=this.defaultActions[R]:((k===null||typeof k>"u")&&(k=ae()),v=B[R]&&B[R][k]),typeof v>"u"||!v.length||!v[0]){var Z="";X=[];for(K in B[R])this.terminals_[K]&&K>ue&&X.push("'"+this.terminals_[K]+"'");b.showPosition?Z="Parse error on line "+(z+1)+`:
import{g as fe}from"./chunk-E2GYISFI-Cy7NDsKx.js";import{_ as c,l as te,c as W,K as ye,a8 as be,a9 as me,aa as _e,a3 as Ee,H as Y,i as G,v as ke,J as Se,a4 as Ne,a5 as le,a6 as ce}from"./mermaid-vendor-BNDdXxLk.js";import"./feature-graph-BWr9U7tw.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var $=function(){var t=c(function(_,s,n,a){for(n=n||{},a=_.length;a--;n[_[a]]=s);return n},"o"),g=[1,4],d=[1,13],r=[1,12],p=[1,15],E=[1,16],f=[1,20],h=[1,19],L=[6,7,8],C=[1,26],w=[1,24],N=[1,25],i=[6,7,11],H=[1,31],x=[6,7,11,24],P=[1,6,13,16,17,20,23],M=[1,35],U=[1,36],A=[1,6,7,11,13,16,17,20,23],j=[1,38],V={trace:c(function(){},"trace"),yy:{},symbols_:{error:2,start:3,mindMap:4,spaceLines:5,SPACELINE:6,NL:7,KANBAN:8,document:9,stop:10,EOF:11,statement:12,SPACELIST:13,node:14,shapeData:15,ICON:16,CLASS:17,nodeWithId:18,nodeWithoutId:19,NODE_DSTART:20,NODE_DESCR:21,NODE_DEND:22,NODE_ID:23,SHAPE_DATA:24,$accept:0,$end:1},terminals_:{2:"error",6:"SPACELINE",7:"NL",8:"KANBAN",11:"EOF",13:"SPACELIST",16:"ICON",17:"CLASS",20:"NODE_DSTART",21:"NODE_DESCR",22:"NODE_DEND",23:"NODE_ID",24:"SHAPE_DATA"},productions_:[0,[3,1],[3,2],[5,1],[5,2],[5,2],[4,2],[4,3],[10,1],[10,1],[10,1],[10,2],[10,2],[9,3],[9,2],[12,3],[12,2],[12,2],[12,2],[12,1],[12,2],[12,1],[12,1],[12,1],[12,1],[14,1],[14,1],[19,3],[18,1],[18,4],[15,2],[15,1]],performAction:c(function(s,n,a,o,u,e,B){var l=e.length-1;switch(u){case 6:case 7:return o;case 8:o.getLogger().trace("Stop NL ");break;case 9:o.getLogger().trace("Stop EOF ");break;case 11:o.getLogger().trace("Stop NL2 ");break;case 12:o.getLogger().trace("Stop EOF2 ");break;case 15:o.getLogger().info("Node: ",e[l-1].id),o.addNode(e[l-2].length,e[l-1].id,e[l-1].descr,e[l-1].type,e[l]);break;case 16:o.getLogger().info("Node: ",e[l].id),o.addNode(e[l-1].length,e[l].id,e[l].descr,e[l].type);break;case 17:o.getLogger().trace("Icon: ",e[l]),o.decorateNode({icon:e[l]});break;case 18:case 23:o.decorateNode({class:e[l]});break;case 19:o.getLogger().trace("SPACELIST");break;case 20:o.getLogger().trace("Node: ",e[l-1].id),o.addNode(0,e[l-1].id,e[l-1].descr,e[l-1].type,e[l]);break;case 21:o.getLogger().trace("Node: ",e[l].id),o.addNode(0,e[l].id,e[l].descr,e[l].type);break;case 22:o.decorateNode({icon:e[l]});break;case 27:o.getLogger().trace("node found ..",e[l-2]),this.$={id:e[l-1],descr:e[l-1],type:o.getType(e[l-2],e[l])};break;case 28:this.$={id:e[l],descr:e[l],type:0};break;case 29:o.getLogger().trace("node found ..",e[l-3]),this.$={id:e[l-3],descr:e[l-1],type:o.getType(e[l-2],e[l])};break;case 30:this.$=e[l-1]+e[l];break;case 31:this.$=e[l];break}},"anonymous"),table:[{3:1,4:2,5:3,6:[1,5],8:g},{1:[3]},{1:[2,1]},{4:6,6:[1,7],7:[1,8],8:g},{6:d,7:[1,10],9:9,12:11,13:r,14:14,16:p,17:E,18:17,19:18,20:f,23:h},t(L,[2,3]),{1:[2,2]},t(L,[2,4]),t(L,[2,5]),{1:[2,6],6:d,12:21,13:r,14:14,16:p,17:E,18:17,19:18,20:f,23:h},{6:d,9:22,12:11,13:r,14:14,16:p,17:E,18:17,19:18,20:f,23:h},{6:C,7:w,10:23,11:N},t(i,[2,24],{18:17,19:18,14:27,16:[1,28],17:[1,29],20:f,23:h}),t(i,[2,19]),t(i,[2,21],{15:30,24:H}),t(i,[2,22]),t(i,[2,23]),t(x,[2,25]),t(x,[2,26]),t(x,[2,28],{20:[1,32]}),{21:[1,33]},{6:C,7:w,10:34,11:N},{1:[2,7],6:d,12:21,13:r,14:14,16:p,17:E,18:17,19:18,20:f,23:h},t(P,[2,14],{7:M,11:U}),t(A,[2,8]),t(A,[2,9]),t(A,[2,10]),t(i,[2,16],{15:37,24:H}),t(i,[2,17]),t(i,[2,18]),t(i,[2,20],{24:j}),t(x,[2,31]),{21:[1,39]},{22:[1,40]},t(P,[2,13],{7:M,11:U}),t(A,[2,11]),t(A,[2,12]),t(i,[2,15],{24:j}),t(x,[2,30]),{22:[1,41]},t(x,[2,27]),t(x,[2,29])],defaultActions:{2:[2,1],6:[2,2]},parseError:c(function(s,n){if(n.recoverable)this.trace(s);else{var a=new Error(s);throw a.hash=n,a}},"parseError"),parse:c(function(s){var n=this,a=[0],o=[],u=[null],e=[],B=this.table,l="",z=0,ie=0,ue=2,re=1,ge=e.slice.call(arguments,1),b=Object.create(this.lexer),T={yy:{}};for(var J in this.yy)Object.prototype.hasOwnProperty.call(this.yy,J)&&(T.yy[J]=this.yy[J]);b.setInput(s,T.yy),T.yy.lexer=b,T.yy.parser=this,typeof b.yylloc>"u"&&(b.yylloc={});var q=b.yylloc;e.push(q);var de=b.options&&b.options.ranges;typeof T.yy.parseError=="function"?this.parseError=T.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;function pe(S){a.length=a.length-2*S,u.length=u.length-S,e.length=e.length-S}c(pe,"popStack");function ae(){var S;return S=o.pop()||b.lex()||re,typeof S!="number"&&(S instanceof Array&&(o=S,S=o.pop()),S=n.symbols_[S]||S),S}c(ae,"lex");for(var k,R,v,Q,F={},K,I,oe,X;;){if(R=a[a.length-1],this.defaultActions[R]?v=this.defaultActions[R]:((k===null||typeof k>"u")&&(k=ae()),v=B[R]&&B[R][k]),typeof v>"u"||!v.length||!v[0]){var Z="";X=[];for(K in B[R])this.terminals_[K]&&K>ue&&X.push("'"+this.terminals_[K]+"'");b.showPosition?Z="Parse error on line "+(z+1)+`:
`+b.showPosition()+`
Expecting `+X.join(", ")+", got '"+(this.terminals_[k]||k)+"'":Z="Parse error on line "+(z+1)+": Unexpected "+(k==re?"end of input":"'"+(this.terminals_[k]||k)+"'"),this.parseError(Z,{text:b.match,token:this.terminals_[k]||k,line:b.yylineno,loc:q,expected:X})}if(v[0]instanceof Array&&v.length>1)throw new Error("Parse Error: multiple actions possible at state: "+R+", token: "+k);switch(v[0]){case 1:a.push(k),u.push(b.yytext),e.push(b.yylloc),a.push(v[1]),k=null,ie=b.yyleng,l=b.yytext,z=b.yylineno,q=b.yylloc;break;case 2:if(I=this.productions_[v[1]][1],F.$=u[u.length-I],F._$={first_line:e[e.length-(I||1)].first_line,last_line:e[e.length-1].last_line,first_column:e[e.length-(I||1)].first_column,last_column:e[e.length-1].last_column},de&&(F._$.range=[e[e.length-(I||1)].range[0],e[e.length-1].range[1]]),Q=this.performAction.apply(F,[l,ie,z,T.yy,v[1],u,e].concat(ge)),typeof Q<"u")return Q;I&&(a=a.slice(0,-1*I*2),u=u.slice(0,-1*I),e=e.slice(0,-1*I)),a.push(this.productions_[v[1]][0]),u.push(F.$),e.push(F._$),oe=B[a[a.length-2]][a[a.length-1]],a.push(oe);break;case 3:return!0}}return!0},"parse")},m=function(){var _={EOF:1,parseError:c(function(n,a){if(this.yy.parser)this.yy.parser.parseError(n,a);else throw new Error(n)},"parseError"),setInput:c(function(s,n){return this.yy=n||this.yy||{},this._input=s,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},"setInput"),input:c(function(){var s=this._input[0];this.yytext+=s,this.yyleng++,this.offset++,this.match+=s,this.matched+=s;var n=s.match(/(?:\r\n?|\n).*/g);return n?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),s},"input"),unput:c(function(s){var n=s.length,a=s.split(/(?:\r\n?|\n)/g);this._input=s+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-n),this.offset-=n;var o=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),a.length-1&&(this.yylineno-=a.length-1);var u=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:a?(a.length===o.length?this.yylloc.first_column:0)+o[o.length-a.length].length-a[0].length:this.yylloc.first_column-n},this.options.ranges&&(this.yylloc.range=[u[0],u[0]+this.yyleng-n]),this.yyleng=this.yytext.length,this},"unput"),more:c(function(){return this._more=!0,this},"more"),reject:c(function(){if(this.options.backtrack_lexer)this._backtrack=!0;else return this.parseError("Lexical error on line "+(this.yylineno+1)+`. You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).
`+this.showPosition(),{text:"",token:null,line:this.yylineno});return this},"reject"),less:c(function(s){this.unput(this.match.slice(s))},"less"),pastInput:c(function(){var s=this.matched.substr(0,this.matched.length-this.match.length);return(s.length>20?"...":"")+s.substr(-20).replace(/\n/g,"")},"pastInput"),upcomingInput:c(function(){var s=this.match;return s.length<20&&(s+=this._input.substr(0,20-s.length)),(s.substr(0,20)+(s.length>20?"...":"")).replace(/\n/g,"")},"upcomingInput"),showPosition:c(function(){var s=this.pastInput(),n=new Array(s.length+1).join("-");return s+this.upcomingInput()+`

File diff suppressed because one or more lines are too long

View file

@ -1,4 +1,4 @@
import{p as N}from"./chunk-353BL4L5-CR8qX5CO.js";import{_ as i,g as B,s as U,a as q,b as H,t as K,q as V,l as C,c as Z,F as j,K as J,M as Q,N as z,O as X,e as Y,z as tt,P as et,H as at}from"./mermaid-vendor-B20mDgAo.js";import{p as rt}from"./treemap-75Q7IDZK-64OceOGQ.js";import"./feature-graph-CS4MyqEv.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";import"./_baseUniq-Coqzfado.js";import"./_basePickBy-zcTWmF8I.js";import"./clone-D1fuhfq3.js";var it=at.pie,D={sections:new Map,showData:!1},f=D.sections,w=D.showData,st=structuredClone(it),ot=i(()=>structuredClone(st),"getConfig"),nt=i(()=>{f=new Map,w=D.showData,tt()},"clear"),lt=i(({label:t,value:a})=>{f.has(t)||(f.set(t,a),C.debug(`added new section: ${t}, with value: ${a}`))},"addSection"),ct=i(()=>f,"getSections"),pt=i(t=>{w=t},"setShowData"),dt=i(()=>w,"getShowData"),F={getConfig:ot,clear:nt,setDiagramTitle:V,getDiagramTitle:K,setAccTitle:H,getAccTitle:q,setAccDescription:U,getAccDescription:B,addSection:lt,getSections:ct,setShowData:pt,getShowData:dt},gt=i((t,a)=>{N(t,a),a.setShowData(t.showData),t.sections.map(a.addSection)},"populateDb"),ut={parse:i(async t=>{const a=await rt("pie",t);C.debug(a),gt(a,F)},"parse")},mt=i(t=>`
import{p as N}from"./chunk-353BL4L5-_6GQP54L.js";import{_ as i,g as B,s as U,a as q,b as H,t as K,q as V,l as C,c as Z,F as j,K as J,M as Q,N as z,O as X,e as Y,z as tt,P as et,H as at}from"./mermaid-vendor-BNDdXxLk.js";import{p as rt}from"./treemap-75Q7IDZK-BTRfYg24.js";import"./feature-graph-BWr9U7tw.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";import"./_baseUniq-I-HKKcQv.js";import"./_basePickBy-BPAKxgo7.js";import"./clone-DwaenhoE.js";var it=at.pie,D={sections:new Map,showData:!1},f=D.sections,w=D.showData,st=structuredClone(it),ot=i(()=>structuredClone(st),"getConfig"),nt=i(()=>{f=new Map,w=D.showData,tt()},"clear"),lt=i(({label:t,value:a})=>{f.has(t)||(f.set(t,a),C.debug(`added new section: ${t}, with value: ${a}`))},"addSection"),ct=i(()=>f,"getSections"),pt=i(t=>{w=t},"setShowData"),dt=i(()=>w,"getShowData"),F={getConfig:ot,clear:nt,setDiagramTitle:V,getDiagramTitle:K,setAccTitle:H,getAccTitle:q,setAccDescription:U,getAccDescription:B,addSection:lt,getSections:ct,setShowData:pt,getShowData:dt},gt=i((t,a)=>{N(t,a),a.setShowData(t.showData),t.sections.map(a.addSection)},"populateDb"),ut={parse:i(async t=>{const a=await rt("pie",t);C.debug(a),gt(a,F)},"parse")},mt=i(t=>`
.pieCircle{
stroke: ${t.pieStrokeColor};
stroke-width : ${t.pieStrokeWidth};

View file

@ -1 +1 @@
import{s as r,b as e,a,S as i}from"./chunk-OW32GOEJ-C5pOkIhC.js";import{_ as s}from"./mermaid-vendor-B20mDgAo.js";import"./chunk-BFAMUDN2-Cm57CA8s.js";import"./chunk-SKB7J2MH-CX-6qXaF.js";import"./feature-graph-CS4MyqEv.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var f={parser:a,get db(){return new i(2)},renderer:e,styles:r,init:s(t=>{t.state||(t.state={}),t.state.arrowMarkerAbsolute=t.arrowMarkerAbsolute},"init")};export{f as diagram};
import{s as r,b as e,a,S as i}from"./chunk-OW32GOEJ-DFogk2Dj.js";import{_ as s}from"./mermaid-vendor-BNDdXxLk.js";import"./chunk-BFAMUDN2-U-jvyawD.js";import"./chunk-SKB7J2MH-Dh8Tfa_E.js";import"./feature-graph-BWr9U7tw.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var f={parser:a,get db(){return new i(2)},renderer:e,styles:r,init:s(t=>{t.state||(t.state={}),t.state.arrowMarkerAbsolute=t.arrowMarkerAbsolute},"init")};export{f as diagram};

View file

@ -1,4 +1,4 @@
import{_ as s,c as xt,l as E,d as q,a3 as kt,a4 as _t,a5 as bt,a6 as vt,N as nt,D as wt,a7 as St,z as Et}from"./mermaid-vendor-B20mDgAo.js";import"./feature-graph-CS4MyqEv.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var X=function(){var n=s(function(f,r,a,h){for(a=a||{},h=f.length;h--;a[f[h]]=r);return a},"o"),t=[6,8,10,11,12,14,16,17,20,21],e=[1,9],l=[1,10],i=[1,11],d=[1,12],c=[1,13],g=[1,16],m=[1,17],p={trace:s(function(){},"trace"),yy:{},symbols_:{error:2,start:3,timeline:4,document:5,EOF:6,line:7,SPACE:8,statement:9,NEWLINE:10,title:11,acc_title:12,acc_title_value:13,acc_descr:14,acc_descr_value:15,acc_descr_multiline_value:16,section:17,period_statement:18,event_statement:19,period:20,event:21,$accept:0,$end:1},terminals_:{2:"error",4:"timeline",6:"EOF",8:"SPACE",10:"NEWLINE",11:"title",12:"acc_title",13:"acc_title_value",14:"acc_descr",15:"acc_descr_value",16:"acc_descr_multiline_value",17:"section",20:"period",21:"event"},productions_:[0,[3,3],[5,0],[5,2],[7,2],[7,1],[7,1],[7,1],[9,1],[9,2],[9,2],[9,1],[9,1],[9,1],[9,1],[18,1],[19,1]],performAction:s(function(r,a,h,u,y,o,S){var k=o.length-1;switch(y){case 1:return o[k-1];case 2:this.$=[];break;case 3:o[k-1].push(o[k]),this.$=o[k-1];break;case 4:case 5:this.$=o[k];break;case 6:case 7:this.$=[];break;case 8:u.getCommonDb().setDiagramTitle(o[k].substr(6)),this.$=o[k].substr(6);break;case 9:this.$=o[k].trim(),u.getCommonDb().setAccTitle(this.$);break;case 10:case 11:this.$=o[k].trim(),u.getCommonDb().setAccDescription(this.$);break;case 12:u.addSection(o[k].substr(8)),this.$=o[k].substr(8);break;case 15:u.addTask(o[k],0,""),this.$=o[k];break;case 16:u.addEvent(o[k].substr(2)),this.$=o[k];break}},"anonymous"),table:[{3:1,4:[1,2]},{1:[3]},n(t,[2,2],{5:3}),{6:[1,4],7:5,8:[1,6],9:7,10:[1,8],11:e,12:l,14:i,16:d,17:c,18:14,19:15,20:g,21:m},n(t,[2,7],{1:[2,1]}),n(t,[2,3]),{9:18,11:e,12:l,14:i,16:d,17:c,18:14,19:15,20:g,21:m},n(t,[2,5]),n(t,[2,6]),n(t,[2,8]),{13:[1,19]},{15:[1,20]},n(t,[2,11]),n(t,[2,12]),n(t,[2,13]),n(t,[2,14]),n(t,[2,15]),n(t,[2,16]),n(t,[2,4]),n(t,[2,9]),n(t,[2,10])],defaultActions:{},parseError:s(function(r,a){if(a.recoverable)this.trace(r);else{var h=new Error(r);throw h.hash=a,h}},"parseError"),parse:s(function(r){var a=this,h=[0],u=[],y=[null],o=[],S=this.table,k="",M=0,C=0,B=2,J=1,O=o.slice.call(arguments,1),_=Object.create(this.lexer),N={yy:{}};for(var L in this.yy)Object.prototype.hasOwnProperty.call(this.yy,L)&&(N.yy[L]=this.yy[L]);_.setInput(r,N.yy),N.yy.lexer=_,N.yy.parser=this,typeof _.yylloc>"u"&&(_.yylloc={});var v=_.yylloc;o.push(v);var $=_.options&&_.options.ranges;typeof N.yy.parseError=="function"?this.parseError=N.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;function R(T){h.length=h.length-2*T,y.length=y.length-T,o.length=o.length-T}s(R,"popStack");function A(){var T;return T=u.pop()||_.lex()||J,typeof T!="number"&&(T instanceof Array&&(u=T,T=u.pop()),T=a.symbols_[T]||T),T}s(A,"lex");for(var w,H,I,K,F={},j,P,et,G;;){if(H=h[h.length-1],this.defaultActions[H]?I=this.defaultActions[H]:((w===null||typeof w>"u")&&(w=A()),I=S[H]&&S[H][w]),typeof I>"u"||!I.length||!I[0]){var Q="";G=[];for(j in S[H])this.terminals_[j]&&j>B&&G.push("'"+this.terminals_[j]+"'");_.showPosition?Q="Parse error on line "+(M+1)+`:
import{_ as s,c as xt,l as E,d as q,a3 as kt,a4 as _t,a5 as bt,a6 as vt,N as nt,D as wt,a7 as St,z as Et}from"./mermaid-vendor-BNDdXxLk.js";import"./feature-graph-BWr9U7tw.js";import"./react-vendor-DEwriMA6.js";import"./graph-vendor-B-X5JegA.js";import"./ui-vendor-CeCm8EER.js";import"./utils-vendor-BysuhMZA.js";var X=function(){var n=s(function(f,r,a,h){for(a=a||{},h=f.length;h--;a[f[h]]=r);return a},"o"),t=[6,8,10,11,12,14,16,17,20,21],e=[1,9],l=[1,10],i=[1,11],d=[1,12],c=[1,13],g=[1,16],m=[1,17],p={trace:s(function(){},"trace"),yy:{},symbols_:{error:2,start:3,timeline:4,document:5,EOF:6,line:7,SPACE:8,statement:9,NEWLINE:10,title:11,acc_title:12,acc_title_value:13,acc_descr:14,acc_descr_value:15,acc_descr_multiline_value:16,section:17,period_statement:18,event_statement:19,period:20,event:21,$accept:0,$end:1},terminals_:{2:"error",4:"timeline",6:"EOF",8:"SPACE",10:"NEWLINE",11:"title",12:"acc_title",13:"acc_title_value",14:"acc_descr",15:"acc_descr_value",16:"acc_descr_multiline_value",17:"section",20:"period",21:"event"},productions_:[0,[3,3],[5,0],[5,2],[7,2],[7,1],[7,1],[7,1],[9,1],[9,2],[9,2],[9,1],[9,1],[9,1],[9,1],[18,1],[19,1]],performAction:s(function(r,a,h,u,y,o,S){var k=o.length-1;switch(y){case 1:return o[k-1];case 2:this.$=[];break;case 3:o[k-1].push(o[k]),this.$=o[k-1];break;case 4:case 5:this.$=o[k];break;case 6:case 7:this.$=[];break;case 8:u.getCommonDb().setDiagramTitle(o[k].substr(6)),this.$=o[k].substr(6);break;case 9:this.$=o[k].trim(),u.getCommonDb().setAccTitle(this.$);break;case 10:case 11:this.$=o[k].trim(),u.getCommonDb().setAccDescription(this.$);break;case 12:u.addSection(o[k].substr(8)),this.$=o[k].substr(8);break;case 15:u.addTask(o[k],0,""),this.$=o[k];break;case 16:u.addEvent(o[k].substr(2)),this.$=o[k];break}},"anonymous"),table:[{3:1,4:[1,2]},{1:[3]},n(t,[2,2],{5:3}),{6:[1,4],7:5,8:[1,6],9:7,10:[1,8],11:e,12:l,14:i,16:d,17:c,18:14,19:15,20:g,21:m},n(t,[2,7],{1:[2,1]}),n(t,[2,3]),{9:18,11:e,12:l,14:i,16:d,17:c,18:14,19:15,20:g,21:m},n(t,[2,5]),n(t,[2,6]),n(t,[2,8]),{13:[1,19]},{15:[1,20]},n(t,[2,11]),n(t,[2,12]),n(t,[2,13]),n(t,[2,14]),n(t,[2,15]),n(t,[2,16]),n(t,[2,4]),n(t,[2,9]),n(t,[2,10])],defaultActions:{},parseError:s(function(r,a){if(a.recoverable)this.trace(r);else{var h=new Error(r);throw h.hash=a,h}},"parseError"),parse:s(function(r){var a=this,h=[0],u=[],y=[null],o=[],S=this.table,k="",M=0,C=0,B=2,J=1,O=o.slice.call(arguments,1),_=Object.create(this.lexer),N={yy:{}};for(var L in this.yy)Object.prototype.hasOwnProperty.call(this.yy,L)&&(N.yy[L]=this.yy[L]);_.setInput(r,N.yy),N.yy.lexer=_,N.yy.parser=this,typeof _.yylloc>"u"&&(_.yylloc={});var v=_.yylloc;o.push(v);var $=_.options&&_.options.ranges;typeof N.yy.parseError=="function"?this.parseError=N.yy.parseError:this.parseError=Object.getPrototypeOf(this).parseError;function R(T){h.length=h.length-2*T,y.length=y.length-T,o.length=o.length-T}s(R,"popStack");function A(){var T;return T=u.pop()||_.lex()||J,typeof T!="number"&&(T instanceof Array&&(u=T,T=u.pop()),T=a.symbols_[T]||T),T}s(A,"lex");for(var w,H,I,K,F={},j,P,et,G;;){if(H=h[h.length-1],this.defaultActions[H]?I=this.defaultActions[H]:((w===null||typeof w>"u")&&(w=A()),I=S[H]&&S[H][w]),typeof I>"u"||!I.length||!I[0]){var Q="";G=[];for(j in S[H])this.terminals_[j]&&j>B&&G.push("'"+this.terminals_[j]+"'");_.showPosition?Q="Parse error on line "+(M+1)+`:
`+_.showPosition()+`
Expecting `+G.join(", ")+", got '"+(this.terminals_[w]||w)+"'":Q="Parse error on line "+(M+1)+": Unexpected "+(w==J?"end of input":"'"+(this.terminals_[w]||w)+"'"),this.parseError(Q,{text:_.match,token:this.terminals_[w]||w,line:_.yylineno,loc:v,expected:G})}if(I[0]instanceof Array&&I.length>1)throw new Error("Parse Error: multiple actions possible at state: "+H+", token: "+w);switch(I[0]){case 1:h.push(w),y.push(_.yytext),o.push(_.yylloc),h.push(I[1]),w=null,C=_.yyleng,k=_.yytext,M=_.yylineno,v=_.yylloc;break;case 2:if(P=this.productions_[I[1]][1],F.$=y[y.length-P],F._$={first_line:o[o.length-(P||1)].first_line,last_line:o[o.length-1].last_line,first_column:o[o.length-(P||1)].first_column,last_column:o[o.length-1].last_column},$&&(F._$.range=[o[o.length-(P||1)].range[0],o[o.length-1].range[1]]),K=this.performAction.apply(F,[k,C,M,N.yy,I[1],y,o].concat(O)),typeof K<"u")return K;P&&(h=h.slice(0,-1*P*2),y=y.slice(0,-1*P),o=o.slice(0,-1*P)),h.push(this.productions_[I[1]][0]),y.push(F.$),o.push(F._$),et=S[h[h.length-2]][h[h.length-1]],h.push(et);break;case 3:return!0}}return!0},"parse")},x=function(){var f={EOF:1,parseError:s(function(a,h){if(this.yy.parser)this.yy.parser.parseError(a,h);else throw new Error(a)},"parseError"),setInput:s(function(r,a){return this.yy=a||this.yy||{},this._input=r,this._more=this._backtrack=this.done=!1,this.yylineno=this.yyleng=0,this.yytext=this.matched=this.match="",this.conditionStack=["INITIAL"],this.yylloc={first_line:1,first_column:0,last_line:1,last_column:0},this.options.ranges&&(this.yylloc.range=[0,0]),this.offset=0,this},"setInput"),input:s(function(){var r=this._input[0];this.yytext+=r,this.yyleng++,this.offset++,this.match+=r,this.matched+=r;var a=r.match(/(?:\r\n?|\n).*/g);return a?(this.yylineno++,this.yylloc.last_line++):this.yylloc.last_column++,this.options.ranges&&this.yylloc.range[1]++,this._input=this._input.slice(1),r},"input"),unput:s(function(r){var a=r.length,h=r.split(/(?:\r\n?|\n)/g);this._input=r+this._input,this.yytext=this.yytext.substr(0,this.yytext.length-a),this.offset-=a;var u=this.match.split(/(?:\r\n?|\n)/g);this.match=this.match.substr(0,this.match.length-1),this.matched=this.matched.substr(0,this.matched.length-1),h.length-1&&(this.yylineno-=h.length-1);var y=this.yylloc.range;return this.yylloc={first_line:this.yylloc.first_line,last_line:this.yylineno+1,first_column:this.yylloc.first_column,last_column:h?(h.length===u.length?this.yylloc.first_column:0)+u[u.length-h.length].length-h[0].length:this.yylloc.first_column-a},this.options.ranges&&(this.yylloc.range=[y[0],y[0]+this.yyleng-a]),this.yyleng=this.yytext.length,this},"unput"),more:s(function(){return this._more=!0,this},"more"),reject:s(function(){if(this.options.backtrack_lexer)this._backtrack=!0;else return this.parseError("Lexical error on line "+(this.yylineno+1)+`. You can only invoke reject() in the lexer when the lexer is of the backtracking persuasion (options.backtrack_lexer = true).
`+this.showPosition(),{text:"",token:null,line:this.yylineno});return this},"reject"),less:s(function(r){this.unput(this.match.slice(r))},"less"),pastInput:s(function(){var r=this.matched.substr(0,this.matched.length-this.match.length);return(r.length>20?"...":"")+r.substr(-20).replace(/\n/g,"")},"pastInput"),upcomingInput:s(function(){var r=this.match;return r.length<20&&(r+=this._input.substr(0,20-r.length)),(r.substr(0,20)+(r.length>20?"...":"")).replace(/\n/g,"")},"upcomingInput"),showPosition:s(function(){var r=this.pastInput(),a=new Array(r.length+1).join("-");return r+this.upcomingInput()+`

View file

@ -8,18 +8,18 @@
<link rel="icon" type="image/png" href="favicon.png" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Lightrag</title>
<script type="module" crossorigin src="/webui/assets/index-CIdJpUuC.js"></script>
<script type="module" crossorigin src="/webui/assets/index-MNCYeahn.js"></script>
<link rel="modulepreload" crossorigin href="/webui/assets/react-vendor-DEwriMA6.js">
<link rel="modulepreload" crossorigin href="/webui/assets/ui-vendor-CeCm8EER.js">
<link rel="modulepreload" crossorigin href="/webui/assets/graph-vendor-B-X5JegA.js">
<link rel="modulepreload" crossorigin href="/webui/assets/utils-vendor-BysuhMZA.js">
<link rel="modulepreload" crossorigin href="/webui/assets/feature-graph-CS4MyqEv.js">
<link rel="modulepreload" crossorigin href="/webui/assets/feature-documents-4TV9qUPI.js">
<link rel="modulepreload" crossorigin href="/webui/assets/mermaid-vendor-B20mDgAo.js">
<link rel="modulepreload" crossorigin href="/webui/assets/feature-graph-BWr9U7tw.js">
<link rel="modulepreload" crossorigin href="/webui/assets/feature-documents-D26P2AXA.js">
<link rel="modulepreload" crossorigin href="/webui/assets/mermaid-vendor-BNDdXxLk.js">
<link rel="modulepreload" crossorigin href="/webui/assets/markdown-vendor-DmIvJdn7.js">
<link rel="modulepreload" crossorigin href="/webui/assets/feature-retrieval-BwXSap2b.js">
<link rel="modulepreload" crossorigin href="/webui/assets/feature-retrieval-Dyng9L25.js">
<link rel="stylesheet" crossorigin href="/webui/assets/feature-graph-BipNuM18.css">
<link rel="stylesheet" crossorigin href="/webui/assets/index-CafJWW1u.css">
<link rel="stylesheet" crossorigin href="/webui/assets/index-BvrNHAMA.css">
</head>
<body>
<div id="root"></div>

View file

@ -26,11 +26,12 @@ DEFAULT_SUMMARY_CONTEXT_SIZE = 12000
DEFAULT_ENTITY_TYPES = [
"Organization",
"Person",
"Equiment",
"Product",
"Technology",
"Location",
"Event",
"Technology",
"Equipment",
"Product",
"Document",
"Category",
]
@ -68,7 +69,7 @@ DEFAULT_EMBEDDING_FUNC_MAX_ASYNC = 8 # Default max async for embedding function
DEFAULT_EMBEDDING_BATCH_NUM = 10 # Default batch size for embedding computations
# Gunicorn worker timeout
DEFAULT_TIMEOUT = 210
DEFAULT_TIMEOUT = 300
# Default llm and embedding timeout
DEFAULT_LLM_TIMEOUT = 180

View file

@ -164,9 +164,7 @@ class ChromaVectorDBStorage(BaseVectorStorage):
logger.error(f"Error during ChromaDB upsert: {str(e)}")
raise
async def query(
self, query: str, top_k: int, ids: list[str] | None = None
) -> list[dict[str, Any]]:
async def query(self, query: str, top_k: int) -> list[dict[str, Any]]:
try:
embedding = await self.embedding_func(
[query], _priority=5

View file

@ -51,7 +51,7 @@ class NetworkXStorage(BaseGraphStorage):
os.makedirs(workspace_dir, exist_ok=True)
self._graphml_xml_file = os.path.join(
working_dir, f"graph_{self.namespace}.graphml"
workspace_dir, f"graph_{self.namespace}.graphml"
)
self._storage_lock = None
self.storage_updated = None

View file

@ -787,13 +787,13 @@ class PostgreSQLDB:
FROM information_schema.columns
WHERE table_name = $1 AND column_name = $2
"""
params = {
"table_name": migration["table"].lower(),
"column_name": migration["column"],
}
column_info = await self.query(
check_column_sql,
{
"table_name": migration["table"].lower(),
"column_name": migration["column"],
},
list(params.values()),
)
if not column_info:
@ -1035,10 +1035,8 @@ class PostgreSQLDB:
WHERE table_name = $1
AND table_schema = 'public'
"""
table_exists = await self.query(
check_table_sql, {"table_name": table_name.lower()}
)
params = {"table_name": table_name.lower()}
table_exists = await self.query(check_table_sql, list(params.values()))
if not table_exists:
logger.info(f"Creating table {table_name}")
@ -1121,7 +1119,8 @@ class PostgreSQLDB:
AND indexname = $1
"""
existing = await self.query(check_sql, {"indexname": index["name"]})
params = {"indexname": index["name"]}
existing = await self.query(check_sql, list(params.values()))
if not existing:
logger.info(f"Creating pagination index: {index['description']}")
@ -1217,7 +1216,7 @@ class PostgreSQLDB:
async def query(
self,
sql: str,
params: dict[str, Any] | None = None,
params: list[Any] | None = None,
multirows: bool = False,
with_age: bool = False,
graph_name: str | None = None,
@ -1230,7 +1229,7 @@ class PostgreSQLDB:
try:
if params:
rows = await connection.fetch(sql, *params.values())
rows = await connection.fetch(sql, *params)
else:
rows = await connection.fetch(sql)
@ -1446,7 +1445,7 @@ class PGKVStorage(BaseKVStorage):
params = {"workspace": self.workspace}
try:
results = await self.db.query(sql, params, multirows=True)
results = await self.db.query(sql, list(params.values()), multirows=True)
# Special handling for LLM cache to ensure compatibility with _get_cached_extraction_results
if is_namespace(self.namespace, NameSpace.KV_STORE_LLM_RESPONSE_CACHE):
@ -1540,7 +1539,7 @@ class PGKVStorage(BaseKVStorage):
"""Get data by id."""
sql = SQL_TEMPLATES["get_by_id_" + self.namespace]
params = {"workspace": self.workspace, "id": id}
response = await self.db.query(sql, params)
response = await self.db.query(sql, list(params.values()))
if response and is_namespace(self.namespace, NameSpace.KV_STORE_TEXT_CHUNKS):
# Parse llm_cache_list JSON string back to list
@ -1620,7 +1619,7 @@ class PGKVStorage(BaseKVStorage):
ids=",".join([f"'{id}'" for id in ids])
)
params = {"workspace": self.workspace}
results = await self.db.query(sql, params, multirows=True)
results = await self.db.query(sql, list(params.values()), multirows=True)
if results and is_namespace(self.namespace, NameSpace.KV_STORE_TEXT_CHUNKS):
# Parse llm_cache_list JSON string back to list for each result
@ -1708,7 +1707,7 @@ class PGKVStorage(BaseKVStorage):
)
params = {"workspace": self.workspace}
try:
res = await self.db.query(sql, params, multirows=True)
res = await self.db.query(sql, list(params.values()), multirows=True)
if res:
exist_keys = [key["id"] for key in res]
else:
@ -2023,7 +2022,7 @@ class PGVectorStorage(BaseVectorStorage):
"closer_than_threshold": 1 - self.cosine_better_than_threshold,
"top_k": top_k,
}
results = await self.db.query(sql, params=params, multirows=True)
results = await self.db.query(sql, params=list(params.values()), multirows=True)
return results
async def index_done_callback(self) -> None:
@ -2120,7 +2119,7 @@ class PGVectorStorage(BaseVectorStorage):
params = {"workspace": self.workspace, "id": id}
try:
result = await self.db.query(query, params)
result = await self.db.query(query, list(params.values()))
if result:
return dict(result)
return None
@ -2154,7 +2153,7 @@ class PGVectorStorage(BaseVectorStorage):
params = {"workspace": self.workspace}
try:
results = await self.db.query(query, params, multirows=True)
results = await self.db.query(query, list(params.values()), multirows=True)
return [dict(record) for record in results]
except Exception as e:
logger.error(
@ -2187,7 +2186,7 @@ class PGVectorStorage(BaseVectorStorage):
params = {"workspace": self.workspace}
try:
results = await self.db.query(query, params, multirows=True)
results = await self.db.query(query, list(params.values()), multirows=True)
vectors_dict = {}
for result in results:
@ -2274,7 +2273,7 @@ class PGDocStatusStorage(DocStatusStorage):
)
params = {"workspace": self.workspace}
try:
res = await self.db.query(sql, params, multirows=True)
res = await self.db.query(sql, list(params.values()), multirows=True)
if res:
exist_keys = [key["id"] for key in res]
else:
@ -2292,7 +2291,7 @@ class PGDocStatusStorage(DocStatusStorage):
async def get_by_id(self, id: str) -> Union[dict[str, Any], None]:
sql = "select * from LIGHTRAG_DOC_STATUS where workspace=$1 and id=$2"
params = {"workspace": self.workspace, "id": id}
result = await self.db.query(sql, params, True)
result = await self.db.query(sql, list(params.values()), True)
if result is None or result == []:
return None
else:
@ -2338,7 +2337,7 @@ class PGDocStatusStorage(DocStatusStorage):
sql = "SELECT * FROM LIGHTRAG_DOC_STATUS WHERE workspace=$1 AND id = ANY($2)"
params = {"workspace": self.workspace, "ids": ids}
results = await self.db.query(sql, params, True)
results = await self.db.query(sql, list(params.values()), True)
if not results:
return []
@ -2389,7 +2388,8 @@ class PGDocStatusStorage(DocStatusStorage):
FROM LIGHTRAG_DOC_STATUS
where workspace=$1 GROUP BY STATUS
"""
result = await self.db.query(sql, {"workspace": self.workspace}, True)
params = {"workspace": self.workspace}
result = await self.db.query(sql, list(params.values()), True)
counts = {}
for doc in result:
counts[doc["status"]] = doc["count"]
@ -2401,7 +2401,7 @@ class PGDocStatusStorage(DocStatusStorage):
"""all documents with a specific status"""
sql = "select * from LIGHTRAG_DOC_STATUS where workspace=$1 and status=$2"
params = {"workspace": self.workspace, "status": status.value}
result = await self.db.query(sql, params, True)
result = await self.db.query(sql, list(params.values()), True)
docs_by_status = {}
for element in result:
@ -2455,7 +2455,7 @@ class PGDocStatusStorage(DocStatusStorage):
"""Get all documents with a specific track_id"""
sql = "select * from LIGHTRAG_DOC_STATUS where workspace=$1 and track_id=$2"
params = {"workspace": self.workspace, "track_id": track_id}
result = await self.db.query(sql, params, True)
result = await self.db.query(sql, list(params.values()), True)
docs_by_track_id = {}
for element in result:
@ -2555,7 +2555,7 @@ class PGDocStatusStorage(DocStatusStorage):
# Query for total count
count_sql = f"SELECT COUNT(*) as total FROM LIGHTRAG_DOC_STATUS {where_clause}"
count_result = await self.db.query(count_sql, params)
count_result = await self.db.query(count_sql, list(params.values()))
total_count = count_result["total"] if count_result else 0
# Query for paginated data
@ -2568,7 +2568,7 @@ class PGDocStatusStorage(DocStatusStorage):
params["limit"] = page_size
params["offset"] = offset
result = await self.db.query(data_sql, params, True)
result = await self.db.query(data_sql, list(params.values()), True)
# Convert to (doc_id, DocProcessingStatus) tuples
documents = []
@ -2625,7 +2625,7 @@ class PGDocStatusStorage(DocStatusStorage):
GROUP BY status
"""
params = {"workspace": self.workspace}
result = await self.db.query(sql, params, True)
result = await self.db.query(sql, list(params.values()), True)
counts = {}
total_count = 0
@ -3071,7 +3071,7 @@ class PGGraphStorage(BaseGraphStorage):
if readonly:
data = await self.db.query(
query,
params,
list(params.values()) if params else None,
multirows=True,
with_age=True,
graph_name=self.graph_name,
@ -3102,114 +3102,92 @@ class PGGraphStorage(BaseGraphStorage):
return result
async def has_node(self, node_id: str) -> bool:
entity_name_label = self._normalize_node_id(node_id)
query = f"""
SELECT EXISTS (
SELECT 1
FROM {self.graph_name}.base
WHERE ag_catalog.agtype_access_operator(
VARIADIC ARRAY[properties, '"entity_id"'::agtype]
) = (to_json($1::text)::text)::agtype
LIMIT 1
) AS node_exists;
"""
query = """SELECT * FROM cypher('%s', $$
MATCH (n:base {entity_id: "%s"})
RETURN count(n) > 0 AS node_exists
$$) AS (node_exists bool)""" % (self.graph_name, entity_name_label)
single_result = (await self._query(query))[0]
return single_result["node_exists"]
params = {"node_id": node_id}
row = (await self._query(query, params=params))[0]
return bool(row["node_exists"])
async def has_edge(self, source_node_id: str, target_node_id: str) -> bool:
src_label = self._normalize_node_id(source_node_id)
tgt_label = self._normalize_node_id(target_node_id)
query = """SELECT * FROM cypher('%s', $$
MATCH (a:base {entity_id: "%s"})-[r]-(b:base {entity_id: "%s"})
RETURN COUNT(r) > 0 AS edge_exists
$$) AS (edge_exists bool)""" % (
self.graph_name,
src_label,
tgt_label,
)
single_result = (await self._query(query))[0]
return single_result["edge_exists"]
query = f"""
WITH a AS (
SELECT id AS vid
FROM {self.graph_name}.base
WHERE ag_catalog.agtype_access_operator(
VARIADIC ARRAY[properties, '"entity_id"'::agtype]
) = (to_json($1::text)::text)::agtype
),
b AS (
SELECT id AS vid
FROM {self.graph_name}.base
WHERE ag_catalog.agtype_access_operator(
VARIADIC ARRAY[properties, '"entity_id"'::agtype]
) = (to_json($2::text)::text)::agtype
)
SELECT EXISTS (
SELECT 1
FROM {self.graph_name}."DIRECTED" d
JOIN a ON d.start_id = a.vid
JOIN b ON d.end_id = b.vid
LIMIT 1
)
OR EXISTS (
SELECT 1
FROM {self.graph_name}."DIRECTED" d
JOIN a ON d.end_id = a.vid
JOIN b ON d.start_id = b.vid
LIMIT 1
) AS edge_exists;
"""
params = {
"source_node_id": source_node_id,
"target_node_id": target_node_id,
}
row = (await self._query(query, params=params))[0]
return bool(row["edge_exists"])
async def get_node(self, node_id: str) -> dict[str, str] | None:
"""Get node by its label identifier, return only node properties"""
label = self._normalize_node_id(node_id)
query = """SELECT * FROM cypher('%s', $$
MATCH (n:base {entity_id: "%s"})
RETURN n
$$) AS (n agtype)""" % (self.graph_name, label)
record = await self._query(query)
if record:
node = record[0]
node_dict = node["n"]["properties"]
# Process string result, parse it to JSON dictionary
if isinstance(node_dict, str):
try:
node_dict = json.loads(node_dict)
except json.JSONDecodeError:
logger.warning(
f"[{self.workspace}] Failed to parse node string: {node_dict}"
)
return node_dict
result = await self.get_nodes_batch(node_ids=[label])
if result and node_id in result:
return result[node_id]
return None
async def node_degree(self, node_id: str) -> int:
label = self._normalize_node_id(node_id)
query = """SELECT * FROM cypher('%s', $$
MATCH (n:base {entity_id: "%s"})-[r]-()
RETURN count(r) AS total_edge_count
$$) AS (total_edge_count integer)""" % (self.graph_name, label)
record = (await self._query(query))[0]
if record:
edge_count = int(record["total_edge_count"])
return edge_count
result = await self.node_degrees_batch(node_ids=[label])
if result and node_id in result:
return result[node_id]
async def edge_degree(self, src_id: str, tgt_id: str) -> int:
src_degree = await self.node_degree(src_id)
trg_degree = await self.node_degree(tgt_id)
# Convert None to 0 for addition
src_degree = 0 if src_degree is None else src_degree
trg_degree = 0 if trg_degree is None else trg_degree
degrees = int(src_degree) + int(trg_degree)
return degrees
result = await self.edge_degrees_batch(edges=[(src_id, tgt_id)])
if result and (src_id, tgt_id) in result:
return result[(src_id, tgt_id)]
async def get_edge(
self, source_node_id: str, target_node_id: str
) -> dict[str, str] | None:
"""Get edge properties between two nodes"""
src_label = self._normalize_node_id(source_node_id)
tgt_label = self._normalize_node_id(target_node_id)
query = """SELECT * FROM cypher('%s', $$
MATCH (a:base {entity_id: "%s"})-[r]-(b:base {entity_id: "%s"})
RETURN properties(r) as edge_properties
LIMIT 1
$$) AS (edge_properties agtype)""" % (
self.graph_name,
src_label,
tgt_label,
)
record = await self._query(query)
if record and record[0] and record[0]["edge_properties"]:
result = record[0]["edge_properties"]
# Process string result, parse it to JSON dictionary
if isinstance(result, str):
try:
result = json.loads(result)
except json.JSONDecodeError:
logger.warning(
f"[{self.workspace}] Failed to parse edge string: {result}"
)
return result
result = await self.get_edges_batch([{"src": src_label, "tgt": tgt_label}])
if result and (src_label, tgt_label) in result:
return result[(src_label, tgt_label)]
return None
async def get_node_edges(self, source_node_id: str) -> list[tuple[str, str]] | None:
"""

View file

@ -470,7 +470,7 @@ class LightRAG:
self.embedding_func = priority_limit_async_func_call(
self.embedding_func_max_async,
llm_timeout=self.default_embedding_timeout,
queue_name="Embedding func:",
queue_name="Embedding func",
)(self.embedding_func)
# Initialize all storages
@ -567,7 +567,7 @@ class LightRAG:
self.llm_model_func = priority_limit_async_func_call(
self.llm_model_max_async,
llm_timeout=self.default_llm_timeout,
queue_name="LLM func:",
queue_name="LLM func",
)(
partial(
self.llm_model_func, # type: ignore
@ -994,7 +994,7 @@ class LightRAG:
tasks = [
self.chunks_vdb.upsert(inserting_chunks),
self._process_entity_relation_graph(inserting_chunks),
self._process_extract_entities(inserting_chunks),
self.full_docs.upsert(new_docs),
self.text_chunks.upsert(inserting_chunks),
]
@ -1505,6 +1505,15 @@ class LightRAG:
pipeline_status["latest_message"] = log_message
pipeline_status["history_messages"].append(log_message)
# Prevent memory growth: keep only latest 5000 messages when exceeding 10000
if len(pipeline_status["history_messages"]) > 10000:
logger.info(
f"Trimming pipeline history from {len(pipeline_status['history_messages'])} to 5000 messages"
)
pipeline_status["history_messages"] = (
pipeline_status["history_messages"][-5000:]
)
# Get document content from full_docs
content_data = await self.full_docs.get_by_id(doc_id)
if not content_data:
@ -1583,7 +1592,7 @@ class LightRAG:
# Stage 2: Process entity relation graph (after text_chunks are saved)
entity_relation_task = asyncio.create_task(
self._process_entity_relation_graph(
self._process_extract_entities(
chunks, pipeline_status, pipeline_status_lock
)
)
@ -1793,7 +1802,7 @@ class LightRAG:
pipeline_status["latest_message"] = log_message
pipeline_status["history_messages"].append(log_message)
async def _process_entity_relation_graph(
async def _process_extract_entities(
self, chunk: dict[str, Any], pipeline_status=None, pipeline_status_lock=None
) -> list:
try:

View file

@ -172,6 +172,7 @@ async def openai_complete_if_cache(
logger.debug("===== Entering func of LLM =====")
logger.debug(f"Model: {model} Base URL: {base_url}")
logger.debug(f"Client Configs: {client_configs}")
logger.debug(f"Additional kwargs: {kwargs}")
logger.debug(f"Num of history messages: {len(history_messages)}")
verbose_debug(f"System prompt: {system_prompt}")

View file

@ -29,6 +29,8 @@ from .utils import (
pick_by_vector_similarity,
process_chunks_unified,
build_file_path,
safe_vdb_operation_with_exception,
create_prefixed_exception,
)
from .base import (
BaseGraphStorage,
@ -303,7 +305,7 @@ async def _summarize_descriptions(
use_prompt,
use_llm_func,
llm_response_cache=llm_response_cache,
cache_type="extract",
cache_type="summary",
)
return summary
@ -316,9 +318,8 @@ async def _handle_single_entity_extraction(
if len(record_attributes) < 4 or "entity" not in record_attributes[0]:
if len(record_attributes) > 1 and "entity" in record_attributes[0]:
logger.warning(
f"Entity extraction failed in {chunk_key}: expecting 4 fields but got {len(record_attributes)}"
f"{chunk_key}: Entity `{record_attributes[1]}` extraction failed -- expecting 4 fields but got {len(record_attributes)}"
)
logger.warning(f"Entity extracted: {record_attributes[1]}")
return None
try:
@ -346,6 +347,9 @@ async def _handle_single_entity_extraction(
)
return None
# Captitalize first letter of entity_type
entity_type = entity_type.title()
# Process entity description with same cleaning pipeline
entity_description = sanitize_and_normalize_extracted_text(record_attributes[3])
@ -383,9 +387,8 @@ async def _handle_single_relationship_extraction(
if len(record_attributes) < 5 or "relationship" not in record_attributes[0]:
if len(record_attributes) > 1 and "relationship" in record_attributes[0]:
logger.warning(
f"Relation extraction failed in {chunk_key}: expecting 5 fields but got {len(record_attributes)}"
f"{chunk_key}: Relation `{record_attributes[1]}` extraction failed -- expecting 5 fields but got {len(record_attributes)}"
)
logger.warning(f"Relation extracted: {record_attributes[1]}")
return None
try:
@ -674,19 +677,34 @@ async def _rebuild_knowledge_from_chunks(
# Execute all tasks in parallel with semaphore control and early failure detection
done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_EXCEPTION)
# Check if any task raised an exception
# Check if any task raised an exception and ensure all exceptions are retrieved
first_exception = None
for task in done:
if task.exception():
# If a task failed, cancel all pending tasks
for pending_task in pending:
pending_task.cancel()
try:
exception = task.exception()
if exception is not None:
if first_exception is None:
first_exception = exception
else:
# Task completed successfully, retrieve result to mark as processed
task.result()
except Exception as e:
if first_exception is None:
first_exception = e
# Wait for cancellation to complete
if pending:
await asyncio.wait(pending)
# If any task failed, cancel all pending tasks and raise the first exception
if first_exception is not None:
# Cancel all pending tasks
for pending_task in pending:
pending_task.cancel()
# Re-raise the exception to notify the caller
raise task.exception()
# Wait for cancellation to complete
if pending:
await asyncio.wait(pending)
# Re-raise the first exception to notify the caller
raise first_exception
# Final status report
status_message = f"KG rebuild completed: {rebuilt_entities_count} entities and {rebuilt_relationships_count} relationships rebuilt successfully."
@ -792,6 +810,96 @@ async def _get_cached_extraction_results(
return sorted_cached_results
async def _process_extraction_result(
result: str,
chunk_key: str,
file_path: str = "unknown_source",
tuple_delimiter: str = "<|>",
record_delimiter: str = "##",
completion_delimiter: str = "<|COMPLETE|>",
) -> tuple[dict, dict]:
"""Process a single extraction result (either initial or gleaning)
Args:
result (str): The extraction result to process
chunk_key (str): The chunk key for source tracking
file_path (str): The file path for citation
tuple_delimiter (str): Delimiter for tuple fields
record_delimiter (str): Delimiter for records
completion_delimiter (str): Delimiter for completion
Returns:
tuple: (nodes_dict, edges_dict) containing the extracted entities and relationships
"""
maybe_nodes = defaultdict(list)
maybe_edges = defaultdict(list)
# Standardize Chinese brackets around record_delimiter to English brackets
bracket_pattern = f"[)](\\s*{re.escape(record_delimiter)}\\s*)[(]"
result = re.sub(bracket_pattern, ")\\1(", result)
if completion_delimiter not in result:
logger.warning(
f"{chunk_key}: Complete delimiter can not be found in extraction result"
)
records = split_string_by_multi_markers(
result,
[record_delimiter, completion_delimiter],
)
for record in records:
# Remove outer brackets (support English and Chinese brackets)
record = record.strip()
if record.startswith("(") or record.startswith(""):
record = record[1:]
if record.endswith(")") or record.endswith(""):
record = record[:-1]
record = record.strip()
if record is None:
continue
if tuple_delimiter == "<|>":
# fix entity<| with entity<|>
record = re.sub(r"^entity<\|(?!>)", r"entity<|>", record)
# fix relationship<| with relationship<|>
record = re.sub(r"^relationship<\|(?!>)", r"relationship<|>", record)
# fix <||> with <|>
record = record.replace("<||>", "<|>")
# fix < | > with <|>
record = record.replace("< | >", "<|>")
# fix <<|>> with <|>
record = record.replace("<<|>>", "<|>")
# fix <|>> with <|>
record = record.replace("<|>>", "<|>")
# fix <<|> with <|>
record = record.replace("<<|>", "<|>")
# fix <.|> with <|>
record = record.replace("<.|>", "<|>")
# fix <|.> with <|>
record = record.replace("<|.>", "<|>")
record_attributes = split_string_by_multi_markers(record, [tuple_delimiter])
# Try to parse as entity
entity_data = await _handle_single_entity_extraction(
record_attributes, chunk_key, file_path
)
if entity_data is not None:
maybe_nodes[entity_data["entity_name"]].append(entity_data)
continue
# Try to parse as relationship
relationship_data = await _handle_single_relationship_extraction(
record_attributes, chunk_key, file_path
)
if relationship_data is not None:
maybe_edges[
(relationship_data["src_id"], relationship_data["tgt_id"])
].append(relationship_data)
return dict(maybe_nodes), dict(maybe_edges)
async def _parse_extraction_result(
text_chunks_storage: BaseKVStorage, extraction_result: str, chunk_id: str
) -> tuple[dict, dict]:
@ -813,53 +921,16 @@ async def _parse_extraction_result(
if chunk_data
else "unknown_source"
)
context_base = dict(
# Call the shared processing function
return await _process_extraction_result(
extraction_result,
chunk_id,
file_path,
tuple_delimiter=PROMPTS["DEFAULT_TUPLE_DELIMITER"],
record_delimiter=PROMPTS["DEFAULT_RECORD_DELIMITER"],
completion_delimiter=PROMPTS["DEFAULT_COMPLETION_DELIMITER"],
)
maybe_nodes = defaultdict(list)
maybe_edges = defaultdict(list)
# Preventive fix: when tuple_delimiter is <|>, fix LLM output instability issues
if context_base["tuple_delimiter"] == "<|>":
# 1. Convert <||> to <|>
extraction_result = extraction_result.replace("<||>", "<|>")
# 2. Convert < | > to <|>
extraction_result = extraction_result.replace("< | >", "<|>")
# Parse the extraction result using the same logic as in extract_entities
records = split_string_by_multi_markers(
extraction_result,
[context_base["record_delimiter"], context_base["completion_delimiter"]],
)
for record in records:
record = re.search(r"\((.*)\)", record)
if record is None:
continue
record = record.group(1)
record_attributes = split_string_by_multi_markers(
record, [context_base["tuple_delimiter"]]
)
# Try to parse as entity
entity_data = await _handle_single_entity_extraction(
record_attributes, chunk_id, file_path
)
if entity_data is not None:
maybe_nodes[entity_data["entity_name"]].append(entity_data)
continue
# Try to parse as relationship
relationship_data = await _handle_single_relationship_extraction(
record_attributes, chunk_id, file_path
)
if relationship_data is not None:
maybe_edges[
(relationship_data["src_id"], relationship_data["tgt_id"])
].append(relationship_data)
return dict(maybe_nodes), dict(maybe_edges)
async def _rebuild_single_entity(
@ -882,24 +953,24 @@ async def _rebuild_single_entity(
async def _update_entity_storage(
final_description: str, entity_type: str, file_paths: set[str]
):
# Update entity in graph storage
updated_entity_data = {
**current_entity,
"description": final_description,
"entity_type": entity_type,
"source_id": GRAPH_FIELD_SEP.join(chunk_ids),
"file_path": GRAPH_FIELD_SEP.join(file_paths)
if file_paths
else current_entity.get("file_path", "unknown_source"),
}
await knowledge_graph_inst.upsert_node(entity_name, updated_entity_data)
try:
# Update entity in graph storage (critical path)
updated_entity_data = {
**current_entity,
"description": final_description,
"entity_type": entity_type,
"source_id": GRAPH_FIELD_SEP.join(chunk_ids),
"file_path": GRAPH_FIELD_SEP.join(file_paths)
if file_paths
else current_entity.get("file_path", "unknown_source"),
}
await knowledge_graph_inst.upsert_node(entity_name, updated_entity_data)
# Update entity in vector database
entity_vdb_id = compute_mdhash_id(entity_name, prefix="ent-")
# Update entity in vector database (equally critical)
entity_vdb_id = compute_mdhash_id(entity_name, prefix="ent-")
entity_content = f"{entity_name}\n{final_description}"
entity_content = f"{entity_name}\n{final_description}"
await entities_vdb.upsert(
{
vdb_data = {
entity_vdb_id: {
"content": entity_content,
"entity_name": entity_name,
@ -909,7 +980,20 @@ async def _rebuild_single_entity(
"file_path": updated_entity_data["file_path"],
}
}
)
# Use safe operation wrapper - VDB failure must throw exception
await safe_vdb_operation_with_exception(
operation=lambda: entities_vdb.upsert(vdb_data),
operation_name="rebuild_entity_upsert",
entity_name=entity_name,
max_retries=3,
retry_delay=0.1,
)
except Exception as e:
error_msg = f"Failed to update entity storage for `{entity_name}`: {e}"
logger.error(error_msg)
raise # Re-raise exception
# Collect all entity data from relevant chunks
all_entity_data = []
@ -1097,21 +1181,21 @@ async def _rebuild_single_relationship(
await knowledge_graph_inst.upsert_edge(src, tgt, updated_relationship_data)
# Update relationship in vector database
rel_vdb_id = compute_mdhash_id(src + tgt, prefix="rel-")
rel_vdb_id_reverse = compute_mdhash_id(tgt + src, prefix="rel-")
# Delete old vector records first (both directions to be safe)
try:
await relationships_vdb.delete([rel_vdb_id, rel_vdb_id_reverse])
except Exception as e:
logger.debug(
f"Could not delete old relationship vector records {rel_vdb_id}, {rel_vdb_id_reverse}: {e}"
)
rel_vdb_id = compute_mdhash_id(src + tgt, prefix="rel-")
rel_vdb_id_reverse = compute_mdhash_id(tgt + src, prefix="rel-")
# Insert new vector record
rel_content = f"{combined_keywords}\t{src}\n{tgt}\n{final_description}"
await relationships_vdb.upsert(
{
# Delete old vector records first (both directions to be safe)
try:
await relationships_vdb.delete([rel_vdb_id, rel_vdb_id_reverse])
except Exception as e:
logger.debug(
f"Could not delete old relationship vector records {rel_vdb_id}, {rel_vdb_id_reverse}: {e}"
)
# Insert new vector record
rel_content = f"{combined_keywords}\t{src}\n{tgt}\n{final_description}"
vdb_data = {
rel_vdb_id: {
"src_id": src,
"tgt_id": tgt,
@ -1123,7 +1207,20 @@ async def _rebuild_single_relationship(
"file_path": updated_relationship_data["file_path"],
}
}
)
# Use safe operation wrapper - VDB failure must throw exception
await safe_vdb_operation_with_exception(
operation=lambda: relationships_vdb.upsert(vdb_data),
operation_name="rebuild_relationship_upsert",
entity_name=f"{src}-{tgt}",
max_retries=3,
retry_delay=0.2,
)
except Exception as e:
error_msg = f"Failed to rebuild relationship storage for `{src}-{tgt}`: {e}"
logger.error(error_msg)
raise # Re-raise exception
async def _merge_nodes_then_upsert(
@ -1301,9 +1398,9 @@ async def _merge_edges_then_upsert(
# Log based on actual LLM usage
if llm_was_used:
status_message = f"LLMmrg: `{src_id} - {tgt_id}` | {already_fragment}+{num_fragment - already_fragment}{dd_message}"
status_message = f"LLMmrg: `{src_id}`~`{tgt_id}` | {already_fragment}+{num_fragment - already_fragment}{dd_message}"
else:
status_message = f"Merged: `{src_id} - {tgt_id}` | {already_fragment}+{num_fragment - already_fragment}{dd_message}"
status_message = f"Merged: `{src_id}`~`{tgt_id}` | {already_fragment}+{num_fragment - already_fragment}{dd_message}"
logger.info(status_message)
if pipeline_status is not None and pipeline_status_lock is not None:
@ -1468,27 +1565,71 @@ async def merge_nodes_and_edges(
async with get_storage_keyed_lock(
[entity_name], namespace=namespace, enable_logging=False
):
entity_data = await _merge_nodes_then_upsert(
entity_name,
entities,
knowledge_graph_inst,
global_config,
pipeline_status,
pipeline_status_lock,
llm_response_cache,
)
if entity_vdb is not None:
data_for_vdb = {
compute_mdhash_id(entity_data["entity_name"], prefix="ent-"): {
"entity_name": entity_data["entity_name"],
"entity_type": entity_data["entity_type"],
"content": f"{entity_data['entity_name']}\n{entity_data['description']}",
"source_id": entity_data["source_id"],
"file_path": entity_data.get("file_path", "unknown_source"),
try:
# Graph database operation (critical path, must succeed)
entity_data = await _merge_nodes_then_upsert(
entity_name,
entities,
knowledge_graph_inst,
global_config,
pipeline_status,
pipeline_status_lock,
llm_response_cache,
)
# Vector database operation (equally critical, must succeed)
if entity_vdb is not None and entity_data:
data_for_vdb = {
compute_mdhash_id(
entity_data["entity_name"], prefix="ent-"
): {
"entity_name": entity_data["entity_name"],
"entity_type": entity_data["entity_type"],
"content": f"{entity_data['entity_name']}\n{entity_data['description']}",
"source_id": entity_data["source_id"],
"file_path": entity_data.get(
"file_path", "unknown_source"
),
}
}
}
await entity_vdb.upsert(data_for_vdb)
return entity_data
# Use safe operation wrapper - VDB failure must throw exception
await safe_vdb_operation_with_exception(
operation=lambda: entity_vdb.upsert(data_for_vdb),
operation_name="entity_upsert",
entity_name=entity_name,
max_retries=3,
retry_delay=0.1,
)
return entity_data
except Exception as e:
# Any database operation failure is critical
error_msg = (
f"Critical error in entity processing for `{entity_name}`: {e}"
)
logger.error(error_msg)
# Try to update pipeline status, but don't let status update failure affect main exception
try:
if (
pipeline_status is not None
and pipeline_status_lock is not None
):
async with pipeline_status_lock:
pipeline_status["latest_message"] = error_msg
pipeline_status["history_messages"].append(error_msg)
except Exception as status_error:
logger.error(
f"Failed to update pipeline status: {status_error}"
)
# Re-raise the original exception with a prefix
prefixed_exception = create_prefixed_exception(
e, f"`{entity_name}`"
)
raise prefixed_exception from e
# Create entity processing tasks
entity_tasks = []
@ -1503,17 +1644,32 @@ async def merge_nodes_and_edges(
entity_tasks, return_when=asyncio.FIRST_EXCEPTION
)
# Check if any task raised an exception
# Check if any task raised an exception and ensure all exceptions are retrieved
first_exception = None
successful_results = []
for task in done:
if task.exception():
# If a task failed, cancel all pending tasks
for pending_task in pending:
pending_task.cancel()
# Wait for cancellation to complete
if pending:
await asyncio.wait(pending)
# Re-raise the exception to notify the caller
raise task.exception()
try:
exception = task.exception()
if exception is not None:
if first_exception is None:
first_exception = exception
else:
successful_results.append(task.result())
except Exception as e:
if first_exception is None:
first_exception = e
# If any task failed, cancel all pending tasks and raise the first exception
if first_exception is not None:
# Cancel all pending tasks
for pending_task in pending:
pending_task.cancel()
# Wait for cancellation to complete
if pending:
await asyncio.wait(pending)
# Re-raise the first exception to notify the caller
raise first_exception
# If all tasks completed successfully, collect results
processed_entities = [task.result() for task in entity_tasks]
@ -1536,38 +1692,78 @@ async def merge_nodes_and_edges(
namespace=namespace,
enable_logging=False,
):
added_entities = [] # Track entities added during edge processing
edge_data = await _merge_edges_then_upsert(
edge_key[0],
edge_key[1],
edges,
knowledge_graph_inst,
global_config,
pipeline_status,
pipeline_status_lock,
llm_response_cache,
added_entities, # Pass list to collect added entities
)
try:
added_entities = [] # Track entities added during edge processing
if edge_data is None:
return None, []
# Graph database operation (critical path, must succeed)
edge_data = await _merge_edges_then_upsert(
edge_key[0],
edge_key[1],
edges,
knowledge_graph_inst,
global_config,
pipeline_status,
pipeline_status_lock,
llm_response_cache,
added_entities, # Pass list to collect added entities
)
if relationships_vdb is not None:
data_for_vdb = {
compute_mdhash_id(
edge_data["src_id"] + edge_data["tgt_id"], prefix="rel-"
): {
"src_id": edge_data["src_id"],
"tgt_id": edge_data["tgt_id"],
"keywords": edge_data["keywords"],
"content": f"{edge_data['src_id']}\t{edge_data['tgt_id']}\n{edge_data['keywords']}\n{edge_data['description']}",
"source_id": edge_data["source_id"],
"file_path": edge_data.get("file_path", "unknown_source"),
"weight": edge_data.get("weight", 1.0),
if edge_data is None:
return None, []
# Vector database operation (equally critical, must succeed)
if relationships_vdb is not None:
data_for_vdb = {
compute_mdhash_id(
edge_data["src_id"] + edge_data["tgt_id"], prefix="rel-"
): {
"src_id": edge_data["src_id"],
"tgt_id": edge_data["tgt_id"],
"keywords": edge_data["keywords"],
"content": f"{edge_data['src_id']}\t{edge_data['tgt_id']}\n{edge_data['keywords']}\n{edge_data['description']}",
"source_id": edge_data["source_id"],
"file_path": edge_data.get(
"file_path", "unknown_source"
),
"weight": edge_data.get("weight", 1.0),
}
}
}
await relationships_vdb.upsert(data_for_vdb)
return edge_data, added_entities
# Use safe operation wrapper - VDB failure must throw exception
await safe_vdb_operation_with_exception(
operation=lambda: relationships_vdb.upsert(data_for_vdb),
operation_name="relationship_upsert",
entity_name=f"{edge_data['src_id']}-{edge_data['tgt_id']}",
max_retries=3,
retry_delay=0.1,
)
return edge_data, added_entities
except Exception as e:
# Any database operation failure is critical
error_msg = f"Critical error in relationship processing for `{sorted_edge_key}`: {e}"
logger.error(error_msg)
# Try to update pipeline status, but don't let status update failure affect main exception
try:
if (
pipeline_status is not None
and pipeline_status_lock is not None
):
async with pipeline_status_lock:
pipeline_status["latest_message"] = error_msg
pipeline_status["history_messages"].append(error_msg)
except Exception as status_error:
logger.error(
f"Failed to update pipeline status: {status_error}"
)
# Re-raise the original exception with a prefix
prefixed_exception = create_prefixed_exception(
e, f"{sorted_edge_key}"
)
raise prefixed_exception from e
# Create relationship processing tasks
edge_tasks = []
@ -1584,17 +1780,32 @@ async def merge_nodes_and_edges(
edge_tasks, return_when=asyncio.FIRST_EXCEPTION
)
# Check if any task raised an exception
# Check if any task raised an exception and ensure all exceptions are retrieved
first_exception = None
successful_results = []
for task in done:
if task.exception():
# If a task failed, cancel all pending tasks
for pending_task in pending:
pending_task.cancel()
# Wait for cancellation to complete
if pending:
await asyncio.wait(pending)
# Re-raise the exception to notify the caller
raise task.exception()
try:
exception = task.exception()
if exception is not None:
if first_exception is None:
first_exception = exception
else:
successful_results.append(task.result())
except Exception as e:
if first_exception is None:
first_exception = e
# If any task failed, cancel all pending tasks and raise the first exception
if first_exception is not None:
# Cancel all pending tasks
for pending_task in pending:
pending_task.cancel()
# Wait for cancellation to complete
if pending:
await asyncio.wait(pending)
# Re-raise the first exception to notify the caller
raise first_exception
# If all tasks completed successfully, collect results
for task in edge_tasks:
@ -1705,7 +1916,6 @@ async def extract_entities(
# add example's format
examples = examples.format(**example_context_base)
entity_extract_prompt = PROMPTS["entity_extraction"]
context_base = dict(
tuple_delimiter=PROMPTS["DEFAULT_TUPLE_DELIMITER"],
record_delimiter=PROMPTS["DEFAULT_RECORD_DELIMITER"],
@ -1715,64 +1925,9 @@ async def extract_entities(
language=language,
)
continue_prompt = PROMPTS["entity_continue_extraction"].format(**context_base)
if_loop_prompt = PROMPTS["entity_if_loop_extraction"]
processed_chunks = 0
total_chunks = len(ordered_chunks)
async def _process_extraction_result(
result: str, chunk_key: str, file_path: str = "unknown_source"
):
"""Process a single extraction result (either initial or gleaning)
Args:
result (str): The extraction result to process
chunk_key (str): The chunk key for source tracking
file_path (str): The file path for citation
Returns:
tuple: (nodes_dict, edges_dict) containing the extracted entities and relationships
"""
maybe_nodes = defaultdict(list)
maybe_edges = defaultdict(list)
# Preventive fix: when tuple_delimiter is <|>, fix LLM output instability issues
if context_base["tuple_delimiter"] == "<|>":
# 1. Convert <||> to <|>
result = result.replace("<||>", "<|>")
# 2. Convert < | > to <|>
result = result.replace("< | >", "<|>")
records = split_string_by_multi_markers(
result,
[context_base["record_delimiter"], context_base["completion_delimiter"]],
)
for record in records:
record = re.search(r"\((.*)\)", record)
if record is None:
continue
record = record.group(1)
record_attributes = split_string_by_multi_markers(
record, [context_base["tuple_delimiter"]]
)
if_entities = await _handle_single_entity_extraction(
record_attributes, chunk_key, file_path
)
if if_entities is not None:
maybe_nodes[if_entities["entity_name"]].append(if_entities)
continue
if_relation = await _handle_single_relationship_extraction(
record_attributes, chunk_key, file_path
)
if if_relation is not None:
maybe_edges[(if_relation["src_id"], if_relation["tgt_id"])].append(
if_relation
)
return maybe_nodes, maybe_edges
async def _process_single_content(chunk_key_dp: tuple[str, TextChunkSchema]):
"""Process a single chunk
Args:
@ -1792,13 +1947,20 @@ async def extract_entities(
cache_keys_collector = []
# Get initial extraction
hint_prompt = entity_extract_prompt.format(
entity_extraction_system_prompt = PROMPTS[
"entity_extraction_system_prompt"
].format(**{**context_base, "input_text": content})
entity_extraction_user_prompt = PROMPTS["entity_extraction_user_prompt"].format(
**{**context_base, "input_text": content}
)
entity_continue_extraction_user_prompt = PROMPTS[
"entity_continue_extraction_user_prompt"
].format(**{**context_base, "input_text": content})
final_result = await use_llm_func_with_cache(
hint_prompt,
entity_extraction_user_prompt,
use_llm_func,
system_prompt=entity_extraction_system_prompt,
llm_response_cache=llm_response_cache,
cache_type="extract",
chunk_id=chunk_key,
@ -1806,18 +1968,26 @@ async def extract_entities(
)
# Store LLM cache reference in chunk (will be handled by use_llm_func_with_cache)
history = pack_user_ass_to_openai_messages(hint_prompt, final_result)
history = pack_user_ass_to_openai_messages(
entity_extraction_user_prompt, final_result
)
# Process initial extraction with file path
maybe_nodes, maybe_edges = await _process_extraction_result(
final_result, chunk_key, file_path
final_result,
chunk_key,
file_path,
tuple_delimiter=context_base["tuple_delimiter"],
record_delimiter=context_base["record_delimiter"],
completion_delimiter=context_base["completion_delimiter"],
)
# Process additional gleaning results
for now_glean_index in range(entity_extract_max_gleaning):
if entity_extract_max_gleaning > 0:
glean_result = await use_llm_func_with_cache(
continue_prompt,
entity_continue_extraction_user_prompt,
use_llm_func,
system_prompt=entity_extraction_system_prompt,
llm_response_cache=llm_response_cache,
history_messages=history,
cache_type="extract",
@ -1825,11 +1995,14 @@ async def extract_entities(
cache_keys_collector=cache_keys_collector,
)
history += pack_user_ass_to_openai_messages(continue_prompt, glean_result)
# Process gleaning result separately with file path
glean_nodes, glean_edges = await _process_extraction_result(
glean_result, chunk_key, file_path
glean_result,
chunk_key,
file_path,
tuple_delimiter=context_base["tuple_delimiter"],
record_delimiter=context_base["record_delimiter"],
completion_delimiter=context_base["completion_delimiter"],
)
# Merge results - only add entities and edges with new names
@ -1837,28 +2010,15 @@ async def extract_entities(
if (
entity_name not in maybe_nodes
): # Only accetp entities with new name in gleaning stage
maybe_nodes[entity_name] = [] # Explicitly create the list
maybe_nodes[entity_name].extend(entities)
for edge_key, edges in glean_edges.items():
if (
edge_key not in maybe_edges
): # Only accetp edges with new name in gleaning stage
maybe_edges[edge_key] = [] # Explicitly create the list
maybe_edges[edge_key].extend(edges)
if now_glean_index == entity_extract_max_gleaning - 1:
break
if_loop_result: str = await use_llm_func_with_cache(
if_loop_prompt,
use_llm_func,
llm_response_cache=llm_response_cache,
history_messages=history,
cache_type="extract",
cache_keys_collector=cache_keys_collector,
)
if_loop_result = if_loop_result.strip().strip('"').strip("'").lower()
if if_loop_result != "yes":
break
# Batch update chunk's llm_cache_list with all collected cache keys
if cache_keys_collector and text_chunks_storage:
await update_chunk_cache_list(
@ -1898,24 +2058,40 @@ async def extract_entities(
# This allows us to cancel remaining tasks if any task fails
done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_EXCEPTION)
# Check if any task raised an exception
# Check if any task raised an exception and ensure all exceptions are retrieved
first_exception = None
chunk_results = []
for task in done:
if task.exception():
# If a task failed, cancel all pending tasks
# This prevents unnecessary processing since the parent function will abort anyway
for pending_task in pending:
pending_task.cancel()
try:
exception = task.exception()
if exception is not None:
if first_exception is None:
first_exception = exception
else:
chunk_results.append(task.result())
except Exception as e:
if first_exception is None:
first_exception = e
# Wait for cancellation to complete
if pending:
await asyncio.wait(pending)
# If any task failed, cancel all pending tasks and raise the first exception
if first_exception is not None:
# Cancel all pending tasks
for pending_task in pending:
pending_task.cancel()
# Re-raise the exception to notify the caller
raise task.exception()
# Wait for cancellation to complete
if pending:
await asyncio.wait(pending)
# If all tasks completed successfully, collect results
chunk_results = [task.result() for task in tasks]
# Add progress prefix to the exception message
progress_prefix = f"Chunks[{processed_chunks+1}/{total_chunks}]"
# Re-raise the original exception with a prefix
prefixed_exception = create_prefixed_exception(first_exception, progress_prefix)
raise prefixed_exception from first_exception
# If all tasks completed successfully, chunk_results already contains the results
# Return the chunk_results for later processing in merge_nodes_and_edges
return chunk_results
@ -3169,7 +3345,7 @@ async def _get_node_data(
):
# get similar entities
logger.info(
f"Query nodes: {query}, top_k: {query_param.top_k}, cosine: {entities_vdb.cosine_better_than_threshold}"
f"Query nodes: {query} (top_k:{query_param.top_k}, cosine:{entities_vdb.cosine_better_than_threshold})"
)
results = await entities_vdb.query(query, top_k=query_param.top_k)
@ -3445,7 +3621,7 @@ async def _get_edge_data(
query_param: QueryParam,
):
logger.info(
f"Query edges: {keywords}, top_k: {query_param.top_k}, cosine: {relationships_vdb.cosine_better_than_threshold}"
f"Query edges: {keywords} (top_k:{query_param.top_k}, cosine:{relationships_vdb.cosine_better_than_threshold})"
)
results = await relationships_vdb.query(keywords, top_k=query_param.top_k)

View file

@ -10,60 +10,65 @@ PROMPTS["DEFAULT_COMPLETION_DELIMITER"] = "<|COMPLETE|>"
PROMPTS["DEFAULT_USER_PROMPT"] = "n/a"
PROMPTS["entity_extraction"] = """---Goal---
Given a text document that is potentially relevant to this activity and a list of entity types, identify all entities of those types from the text and all relationships among the identified entities.
Use {language} as output language.
PROMPTS["entity_extraction_system_prompt"] = """---Role---
You are a Knowledge Graph Specialist responsible for extracting entities and relationships from the input text.
---Steps---
1. Recognizing definitively conceptualized entities in text. For each identified entity, extract the following information:
- entity_name: Name of the entity, use same language as input text. If English, capitalized the name
- entity_type: One of the following types: [{entity_types}]. If the entity doesn't clearly fit any category, classify it as "Other".
- entity_description: Provide a comprehensive description of the entity's attributes and activities based on the information present in the input text. Do not add external knowledge.
2. Format each entity as:
("entity"{tuple_delimiter}<entity_name>{tuple_delimiter}<entity_type>{tuple_delimiter}<entity_description>)
3. From the entities identified in step 1, identify all pairs of (source_entity, target_entity) that are directly and clearly related based on the text. Unsubstantiated relationships must be excluded from the output.
For each pair of related entities, extract the following information:
- source_entity: name of the source entity, as identified in step 1
- target_entity: name of the target entity, as identified in step 1
- relationship_keywords: one or more high-level key words that summarize the overarching nature of the relationship, focusing on concepts or themes rather than specific details
- relationship_description: Explain the nature of the relationship between the source and target entities, providing a clear rationale for their connection
4. Format each relationship as:
("relationship"{tuple_delimiter}<source_entity>{tuple_delimiter}<target_entity>{tuple_delimiter}<relationship_keywords>{tuple_delimiter}<relationship_description>)
5. Use `{tuple_delimiter}` as field delimiter. Use `{record_delimiter}` as the list delimiter. Ensure no spaces are added around the delimiters.
6. When finished, output `{completion_delimiter}`
7. Return identified entities and relationships in {language}.
---Quality Guidelines---
- Only extract entities that are clearly defined and meaningful in the context
- Avoid over-interpretation; stick to what is explicitly stated in the text
- Include specific numerical data in entity name when relevant
- Ensure entity names are consistent throughout the extraction
---Instructions---
1. **Entity Extraction:** Identify clearly defined and meaningful entities in the input text, and extract the following information:
- entity_name: Name of the entity, ensure entity names are consistent throughout the extraction.
- entity_type: Categorize the entity using the following entity types: {entity_types}; if none of the provided types are suitable, classify it as `Other`.
- entity_description: Provide a comprehensive description of the entity's attributes and activities based on the information present in the input text.
2. **Entity Output Format:** (entity{tuple_delimiter}entity_name{tuple_delimiter}entity_type{tuple_delimiter}entity_description)
3. **Relationship Extraction:** Identify direct, clearly-stated and meaningful relationships between extracted entities within the input text, and extract the following information:
- source_entity: name of the source entity.
- target_entity: name of the target entity.
- relationship_keywords: one or more high-level key words that summarize the overarching nature of the relationship, focusing on concepts or themes rather than specific details.
- relationship_description: Explain the nature of the relationship between the source and target entities, providing a clear rationale for their connection.
4. **Relationship Output Format:** (relationship{tuple_delimiter}source_entity{tuple_delimiter}target_entity{tuple_delimiter}relationship_keywords{tuple_delimiter}relationship_description)
5. **Relationship Order:** Prioritize relationships based on their significance to the intended meaning of input text, and output more crucial relationships first.
6. **Avoid Pronouns:** For entity names and all descriptions, explicitly name the subject or object instead of using pronouns; avoid pronouns such as `this document`, `our company`, `I`, `you`, and `he/she`.
7. **Undirectional Relationship:** Treat relationships as undirected; swapping the source and target entities does not constitute a new relationship. Avoid outputting duplicate relationships.
8. **Language:** Output entity names, keywords and descriptions in {language}.
9. **Delimiter:** Use `{record_delimiter}` as the entity or relationship list delimiter; output `{completion_delimiter}` when all the entities and relationships are extracted.
---Examples---
{examples}
---Real Data---
---Real Data to be Processed---
<Input>
Entity_types: [{entity_types}]
Text:
```
{input_text}
```
"""
---Output---
Output:
PROMPTS["entity_extraction_user_prompt"] = """---Task---
Extract entities and relationships from the input text to be Processed.
---Instructions---
1. Output entities and relationships, prioritized by their relevance to the input text's core meaning.
2. Output `{completion_delimiter}` when all the entities and relationships are extracted.
3. Ensure the output language is {language}.
<Output>
"""
PROMPTS["entity_continue_extraction_user_prompt"] = """---Task---
Identify any missed entities or relationships from the input text to be Processed of last extraction task.
---Instructions---
1. Output the entities and realtionships in the same format as previous extraction task.
2. Do not include entities and relations that have been correctly extracted in last extraction task.
3. If the entity or relation output is truncated or has missing fields in last extraction task, please re-output it in the correct format.
4. Output `{completion_delimiter}` when all the entities and relationships are extracted.
5. Ensure the output language is {language}.
<Output>
"""
PROMPTS["entity_extraction_examples"] = [
"""------Example 1------
Entity_types: [organization,person,equiment,product,technology,location,event,category]
Text:
"""<Input Text>
```
while Alex clenched his jaw, the buzz of frustration dull against the backdrop of Taylor's authoritarian certainty. It was this competitive undercurrent that kept him alert, the sense that his and Jordan's shared commitment to discovery was an unspoken rebellion against Cruz's narrowing vision of control and order.
@ -74,7 +79,7 @@ The underlying dismissal earlier seemed to falter, replaced by a glimpse of relu
It was a small transformation, barely perceptible, but one that Alex noted with an inward nod. They had all been brought here by different paths
```
Output:
<Output>
(entity{tuple_delimiter}Alex{tuple_delimiter}person{tuple_delimiter}Alex is a character who experiences frustration and is observant of the dynamics among other characters.){record_delimiter}
(entity{tuple_delimiter}Taylor{tuple_delimiter}person{tuple_delimiter}Taylor is portrayed with authoritarian certainty and shows a moment of reverence towards a device, indicating a change in perspective.){record_delimiter}
(entity{tuple_delimiter}Jordan{tuple_delimiter}person{tuple_delimiter}Jordan shares a commitment to discovery and has a significant interaction with Taylor regarding a device.){record_delimiter}
@ -88,10 +93,7 @@ Output:
{completion_delimiter}
""",
"""------Example 2------
Entity_types: [organization,person,equiment,product,technology,location,event,category]
Text:
"""<Input Text>
```
Stock markets faced a sharp downturn today as tech giants saw significant declines, with the Global Tech Index dropping by 3.4% in midday trading. Analysts attribute the selloff to investor concerns over rising interest rates and regulatory uncertainty.
@ -102,7 +104,7 @@ Meanwhile, commodity markets reflected a mixed sentiment. Gold futures rose by 1
Financial experts are closely watching the Federal Reserve's next move, as speculation grows over potential rate hikes. The upcoming policy announcement is expected to influence investor confidence and overall market stability.
```
Output:
<Output>
(entity{tuple_delimiter}Global Tech Index{tuple_delimiter}category{tuple_delimiter}The Global Tech Index tracks the performance of major technology stocks and experienced a 3.4% decline today.){record_delimiter}
(entity{tuple_delimiter}Nexon Technologies{tuple_delimiter}organization{tuple_delimiter}Nexon Technologies is a tech company that saw its stock decline by 7.8% after disappointing earnings.){record_delimiter}
(entity{tuple_delimiter}Omega Energy{tuple_delimiter}organization{tuple_delimiter}Omega Energy is an energy company that gained 2.1% in stock value due to rising oil prices.){record_delimiter}
@ -118,15 +120,12 @@ Output:
{completion_delimiter}
""",
"""------Example 3------
Entity_types: [organization,person,equiment,product,technology,location,event,category]
Text:
"""<Input Text>
```
At the World Athletics Championship in Tokyo, Noah Carter broke the 100m sprint record using cutting-edge carbon-fiber spikes.
```
Output:
<Output>
(entity{tuple_delimiter}World Athletics Championship{tuple_delimiter}event{tuple_delimiter}The World Athletics Championship is a global sports competition featuring top athletes in track and field.){record_delimiter}
(entity{tuple_delimiter}Tokyo{tuple_delimiter}location{tuple_delimiter}Tokyo is the host city of the World Athletics Championship.){record_delimiter}
(entity{tuple_delimiter}Noah Carter{tuple_delimiter}person{tuple_delimiter}Noah Carter is a sprinter who set a new record in the 100m sprint at the World Athletics Championship.){record_delimiter}
@ -139,29 +138,6 @@ Output:
(relationship{tuple_delimiter}Noah Carter{tuple_delimiter}World Athletics Championship{tuple_delimiter}athlete participation, competition{tuple_delimiter}Noah Carter is competing at the World Athletics Championship.){record_delimiter}
{completion_delimiter}
""",
"""------Example 4------
Entity_types: [organization,person,equiment,product,technology,location,event,category]
Text:
```
在北京举行的人工智能大会上腾讯公司的首席技术官张伟发布了最新的大语言模型"腾讯智言"该模型在自然语言处理方面取得了重大突破
```
Output:
(entity{tuple_delimiter}人工智能大会{tuple_delimiter}event{tuple_delimiter}人工智能大会是在北京举行的技术会议专注于人工智能领域的最新发展){record_delimiter}
(entity{tuple_delimiter}北京{tuple_delimiter}location{tuple_delimiter}北京是人工智能大会的举办城市){record_delimiter}
(entity{tuple_delimiter}腾讯公司{tuple_delimiter}organization{tuple_delimiter}腾讯公司是参与人工智能大会的科技企业发布了新的语言模型产品){record_delimiter}
(entity{tuple_delimiter}张伟{tuple_delimiter}person{tuple_delimiter}张伟是腾讯公司的首席技术官在大会上发布了新产品){record_delimiter}
(entity{tuple_delimiter}腾讯智言{tuple_delimiter}product{tuple_delimiter}腾讯智言是腾讯公司发布的大语言模型产品在自然语言处理方面有重大突破){record_delimiter}
(entity{tuple_delimiter}自然语言处理技术{tuple_delimiter}technology{tuple_delimiter}自然语言处理技术是腾讯智言模型取得重大突破的技术领域){record_delimiter}
(relationship{tuple_delimiter}人工智能大会{tuple_delimiter}北京{tuple_delimiter}会议地点, 举办关系{tuple_delimiter}人工智能大会在北京举行){record_delimiter}
(relationship{tuple_delimiter}张伟{tuple_delimiter}腾讯公司{tuple_delimiter}雇佣关系, 高管职位{tuple_delimiter}张伟担任腾讯公司的首席技术官){record_delimiter}
(relationship{tuple_delimiter}张伟{tuple_delimiter}腾讯智言{tuple_delimiter}产品发布, 技术展示{tuple_delimiter}张伟在大会上发布了腾讯智言大语言模型){record_delimiter}
(relationship{tuple_delimiter}腾讯智言{tuple_delimiter}自然语言处理技术{tuple_delimiter}技术应用, 突破创新{tuple_delimiter}腾讯智言在自然语言处理技术方面取得了重大突破){record_delimiter}
{completion_delimiter}
""",
]
@ -174,9 +150,10 @@ Your task is to synthesize a list of descriptions of a given entity or relation
---Instructions---
1. **Comprehensiveness:** The summary must integrate key information from all provided descriptions. Do not omit important facts.
2. **Context:** The summary must explicitly mention the name of the entity or relation for full context.
3. **Style:** The output must be written from an objective, third-person perspective.
4. **Length:** Maintain depth and completeness while ensuring the summary's length not exceed {summary_length} tokens.
5. **Language:** The entire output must be written in {language}.
3. **Conflict:** In case of conflicting or inconsistent descriptions, determine if they originate from multiple, distinct entities or relationships that share the same name. If so, summarize each entity or relationship separately and then consolidate all summaries.
4. **Style:** The output must be written from an objective, third-person perspective.
5. **Length:** Maintain depth and completeness while ensuring the summary's length not exceed {summary_length} tokens.
6. **Language:** The entire output must be written in {language}.
---Data---
{description_type} Name: {description_name}
@ -184,49 +161,8 @@ Description List:
{description_list}
---Output---
Output:"""
PROMPTS["entity_continue_extraction"] = """
MANY entities and relationships were missed in the last extraction. Please find only the missing entities and relationships from previous text. Do not include entities and relations that have been previously extracted. :\n
---Remember Steps---
1. Recognizing definitively conceptualized entities in text. For each identified entity, extract the following information:
- entity_name: Name of the entity, use same language as input text. If English, capitalized the name
- entity_type: One of the following types: [{entity_types}]. If the entity doesn't clearly fit any category, classify it as "Other".
- entity_description: Provide a comprehensive description of the entity's attributes and activities based on the information present in the input text. Do not add external knowledge.
2. Format each entity as:
("entity"{tuple_delimiter}<entity_name>{tuple_delimiter}<entity_type>{tuple_delimiter}<entity_description>)
3. From the entities identified in step 1, identify all pairs of (source_entity, target_entity) that are directly and clearly related based on the text. Unsubstantiated relationships must be excluded from the output.
For each pair of related entities, extract the following information:
- source_entity: name of the source entity, as identified in step 1
- target_entity: name of the target entity, as identified in step 1
- relationship_keywords: one or more high-level key words that summarize the overarching nature of the relationship, focusing on concepts or themes rather than specific details
- relationship_description: Explain the nature of the relationship between the source and target entities, providing a clear rationale for their connection
4. Format each relationship as:
("relationship"{tuple_delimiter}<source_entity>{tuple_delimiter}<target_entity>{tuple_delimiter}<relationship_keywords>{tuple_delimiter}<relationship_description>)
5. Use `{tuple_delimiter}` as field delimiter. Use `{record_delimiter}` as the list delimiter. Ensure no spaces are added around the delimiters.
6. When finished, output `{completion_delimiter}`
7. Return identified entities and relationships in {language}.
---Output---
Output:
"""
PROMPTS["entity_if_loop_extraction"] = """
---Goal---'
It appears some entities may have still been missed.
---Output---
Output:
""".strip()
PROMPTS["fail_response"] = (
"Sorry, I'm not able to provide an answer to that question.[no-context]"
)
@ -269,7 +205,7 @@ Generate a concise response based on Knowledge Base and follow Response Rules, c
- Additional user prompt: {user_prompt}
---Response---
Output:"""
"""
PROMPTS["keywords_extraction"] = """---Role---
You are an expert keyword extractor, specializing in analyzing user queries for a Retrieval-Augmented Generation (RAG) system. Your purpose is to identify both high-level and low-level keywords in the user's query that will be used for effective document retrieval.

View file

@ -14,7 +14,7 @@ from dataclasses import dataclass
from datetime import datetime
from functools import wraps
from hashlib import md5
from typing import Any, Protocol, Callable, TYPE_CHECKING, List
from typing import Any, Protocol, Callable, TYPE_CHECKING, List, Optional
import numpy as np
from dotenv import load_dotenv
@ -57,6 +57,50 @@ except ImportError:
)
async def safe_vdb_operation_with_exception(
operation: Callable,
operation_name: str,
entity_name: str = "",
max_retries: int = 3,
retry_delay: float = 0.2,
logger_func: Optional[Callable] = None,
) -> None:
"""
Safely execute vector database operations with retry mechanism and exception handling.
This function ensures that VDB operations are executed with proper error handling
and retry logic. If all retries fail, it raises an exception to maintain data consistency.
Args:
operation: The async operation to execute
operation_name: Operation name for logging purposes
entity_name: Entity name for logging purposes
max_retries: Maximum number of retry attempts
retry_delay: Delay between retries in seconds
logger_func: Logger function to use for error messages
Raises:
Exception: When operation fails after all retry attempts
"""
log_func = logger_func or logger.warning
for attempt in range(max_retries):
try:
await operation()
return # Success, return immediately
except Exception as e:
if attempt >= max_retries - 1:
error_msg = f"VDB {operation_name} failed for {entity_name} after {max_retries} attempts: {e}"
log_func(error_msg)
raise Exception(error_msg) from e
else:
log_func(
f"VDB {operation_name} attempt {attempt + 1} failed for {entity_name}: {e}, retrying..."
)
if retry_delay > 0:
await asyncio.sleep(retry_delay)
def get_env_value(
env_key: str, default: any, value_type: type = str, special_none: bool = False
) -> any:
@ -429,12 +473,12 @@ def priority_limit_async_func_call(
nonlocal max_execution_timeout, max_task_duration
if max_execution_timeout is None:
max_execution_timeout = (
llm_timeout + 30
) # LLM timeout + 30s buffer for network delays
llm_timeout * 2
) # Reserved timeout buffer for low-level retry
if max_task_duration is None:
max_task_duration = (
llm_timeout + 60
) # LLM timeout + 1min buffer for execution phase
llm_timeout * 2 + 15
) # Reserved timeout buffer for health check phase
queue = asyncio.PriorityQueue(maxsize=max_queue_size)
tasks = set()
@ -663,7 +707,7 @@ def priority_limit_async_func_call(
timeout_info.append(f"Health Check: {max_task_duration}s")
timeout_str = (
f" (Timeouts: {', '.join(timeout_info)})" if timeout_info else ""
f"(Timeouts: {', '.join(timeout_info)})" if timeout_info else ""
)
logger.info(
f"{queue_name}: {workers_needed} new workers initialized {timeout_str}"
@ -990,7 +1034,7 @@ async def handle_cache(
args_hash,
prompt,
mode="default",
cache_type=None,
cache_type="unknown",
) -> str | None:
"""Generic cache handling function with flattened cache keys"""
if hashing_kv is None:
@ -1594,14 +1638,18 @@ async def update_chunk_cache_list(
def remove_think_tags(text: str) -> str:
"""Remove <think> tags from the text"""
return re.sub(r"^(<think>.*?</think>|<think>)", "", text, flags=re.DOTALL).strip()
"""Remove <think>...</think> tags from the text
Remove orphon ...</think> tags from the text also"""
return re.sub(
r"^(<think>.*?</think>|.*</think>)", "", text, flags=re.DOTALL
).strip()
async def use_llm_func_with_cache(
input_text: str,
user_prompt: str,
use_llm_func: callable,
llm_response_cache: "BaseKVStorage | None" = None,
system_prompt: str | None = None,
max_tokens: int = None,
history_messages: list[dict[str, str]] = None,
cache_type: str = "extract",
@ -1630,7 +1678,10 @@ async def use_llm_func_with_cache(
LLM response text
"""
# Sanitize input text to prevent UTF-8 encoding errors for all LLM providers
safe_input_text = sanitize_text_for_encoding(input_text)
safe_user_prompt = sanitize_text_for_encoding(user_prompt)
safe_system_prompt = (
sanitize_text_for_encoding(system_prompt) if system_prompt else None
)
# Sanitize history messages if provided
safe_history_messages = None
@ -1641,13 +1692,19 @@ async def use_llm_func_with_cache(
if "content" in safe_msg:
safe_msg["content"] = sanitize_text_for_encoding(safe_msg["content"])
safe_history_messages.append(safe_msg)
history = json.dumps(safe_history_messages, ensure_ascii=False)
else:
history = None
if llm_response_cache:
if safe_history_messages:
history = json.dumps(safe_history_messages, ensure_ascii=False)
_prompt = history + "\n" + safe_input_text
else:
_prompt = safe_input_text
prompt_parts = []
if safe_user_prompt:
prompt_parts.append(safe_user_prompt)
if safe_system_prompt:
prompt_parts.append(safe_system_prompt)
if history:
prompt_parts.append(history)
_prompt = "\n".join(prompt_parts)
arg_hash = compute_args_hash(_prompt)
# Generate cache key for this LLM call
@ -1678,13 +1735,9 @@ async def use_llm_func_with_cache(
if max_tokens is not None:
kwargs["max_tokens"] = max_tokens
try:
res: str = await use_llm_func(safe_input_text, **kwargs)
except Exception as e:
# Add [LLM func] prefix to error message
error_msg = f"[LLM func] {str(e)}"
# Re-raise with the same exception type but modified message
raise type(e)(error_msg) from e
res: str = await use_llm_func(
safe_user_prompt, system_prompt=safe_system_prompt, **kwargs
)
res = remove_think_tags(res)
@ -1714,7 +1767,9 @@ async def use_llm_func_with_cache(
kwargs["max_tokens"] = max_tokens
try:
res = await use_llm_func(safe_input_text, **kwargs)
res = await use_llm_func(
safe_user_prompt, system_prompt=safe_system_prompt, **kwargs
)
except Exception as e:
# Add [LLM func] prefix to error message
error_msg = f"[LLM func] {str(e)}"
@ -1762,17 +1817,22 @@ def sanitize_and_normalize_extracted_text(
def normalize_extracted_info(name: str, remove_inner_quotes=False) -> str:
"""Normalize entity/relation names and description with the following rules:
1. Clean HTML tags (paragraph and line break tags)
2. Convert Chinese symbols to English symbols
3. Remove spaces between Chinese characters
4. Remove spaces between Chinese characters and English letters/numbers
5. Preserve spaces within English text and numbers
6. Replace Chinese parentheses with English parentheses
7. Replace Chinese dash with English dash
8. Remove English quotation marks from the beginning and end of the text
9. Remove English quotation marks in and around chinese
10. Remove Chinese quotation marks
11. Filter out short numeric-only text (length < 3 and only digits/dots)
- Clean HTML tags (paragraph and line break tags)
- Convert Chinese symbols to English symbols
- Remove spaces between Chinese characters
- Remove spaces between Chinese characters and English letters/numbers
- Preserve spaces within English text and numbers
- Replace Chinese parentheses with English parentheses
- Replace Chinese dash with English dash
- Remove English quotation marks from the beginning and end of the text
- Remove English quotation marks in and around chinese
- Remove Chinese quotation marks
- Filter out short numeric-only text (length < 3 and only digits/dots)
- remove_inner_quotes = True
remove Chinese quotes
remove English queotes in and around chinese
Convert non-breaking spaces to regular spaces
Convert narrow non-breaking spaces after non-digits to regular spaces
Args:
name: Entity name to normalize
@ -1781,11 +1841,10 @@ def normalize_extracted_info(name: str, remove_inner_quotes=False) -> str:
Returns:
Normalized entity name
"""
# 1. Clean HTML tags - remove paragraph and line break tags
# Clean HTML tags - remove paragraph and line break tags
name = re.sub(r"</p\s*>|<p\s*>|<p/>", "", name, flags=re.IGNORECASE)
name = re.sub(r"</br\s*>|<br\s*>|<br/>", "", name, flags=re.IGNORECASE)
# 2. Convert Chinese symbols to English symbols
# Chinese full-width letters to half-width (A-Z, a-z)
name = name.translate(
str.maketrans(
@ -1851,12 +1910,22 @@ def normalize_extracted_info(name: str, remove_inner_quotes=False) -> str:
if "" not in inner_content and "" not in inner_content:
name = inner_content
# Handle Chinese-style book title mark
if name.startswith("") and name.endswith(""):
inner_content = name[1:-1]
if "" not in inner_content and "" not in inner_content:
name = inner_content
if remove_inner_quotes:
# remove Chinese quotes
# Remove Chinese quotes
name = name.replace("", "").replace("", "").replace("", "").replace("", "")
# remove English queotes in and around chinese
# Remove English queotes in and around chinese
name = re.sub(r"['\"]+(?=[\u4e00-\u9fa5])", "", name)
name = re.sub(r"(?<=[\u4e00-\u9fa5])['\"]+", "", name)
# Convert non-breaking space to regular space
name = name.replace("\u00a0", " ")
# Convert narrow non-breaking space to regular space when after non-digits
name = re.sub(r"(?<=[^\d])\u202F", " ", name)
# Remove spaces from the beginning and end of the text
name = name.strip()
@ -1946,8 +2015,8 @@ def sanitize_text_for_encoding(text: str, replacement_char: str = "") -> str:
# Unescape HTML escapes
sanitized = html.unescape(sanitized)
# Remove control characters
sanitized = re.sub(r"[\x00-\x1f\x7f-\x9f]", "", sanitized)
# Remove control characters but preserve common whitespace (\t, \n, \r)
sanitized = re.sub(r"[\x00-\x08\x0B\x0C\x0E-\x1F\x7F-\x9F]", "", sanitized)
return sanitized.strip()
@ -2533,3 +2602,45 @@ def get_pinyin_sort_key(text: str) -> str:
else:
# pypinyin not available, use simple string sorting
return text.lower()
def create_prefixed_exception(original_exception: Exception, prefix: str) -> Exception:
"""
Safely create a prefixed exception that adapts to all error types.
Args:
original_exception: The original exception.
prefix: The prefix to add.
Returns:
A new exception with the prefix, maintaining the original exception type if possible.
"""
try:
# Method 1: Try to reconstruct using original arguments.
if hasattr(original_exception, "args") and original_exception.args:
args = list(original_exception.args)
# Find the first string argument and prefix it. This is safer for
# exceptions like OSError where the first arg is an integer (errno).
found_str = False
for i, arg in enumerate(args):
if isinstance(arg, str):
args[i] = f"{prefix}: {arg}"
found_str = True
break
# If no string argument is found, prefix the first argument's string representation.
if not found_str:
args[0] = f"{prefix}: {args[0]}"
return type(original_exception)(*args)
else:
# Method 2: If no args, try single parameter construction.
return type(original_exception)(f"{prefix}: {str(original_exception)}")
except (TypeError, ValueError, AttributeError) as construct_error:
# Method 3: If reconstruction fails, wrap it in a RuntimeError.
# This is the safest fallback, as attempting to create the same type
# with a single string can fail if the constructor requires multiple arguments.
return RuntimeError(
f"{prefix}: {type(original_exception).__name__}: {str(original_exception)} "
f"(Original exception could not be reconstructed: {construct_error})"
)

View file

@ -99,6 +99,9 @@ export type QueryMode = 'naive' | 'local' | 'global' | 'hybrid' | 'mix' | 'bypas
export type Message = {
role: 'user' | 'assistant' | 'system'
content: string
thinkingContent?: string
displayContent?: string
thinkingTime?: number | null
}
export type QueryRequest = {

View file

@ -15,12 +15,13 @@ import type { Element } from 'hast'
import { Prism as SyntaxHighlighter } from 'react-syntax-highlighter'
import { oneLight, oneDark } from 'react-syntax-highlighter/dist/cjs/styles/prism'
import { LoaderIcon, CopyIcon } from 'lucide-react'
import { LoaderIcon, CopyIcon, ChevronDownIcon } from 'lucide-react'
import { useTranslation } from 'react-i18next'
export type MessageWithError = Message & {
id: string // Unique identifier for stable React keys
isError?: boolean
isThinking?: boolean // Flag to indicate if the message is in a "thinking" state
/**
* Indicates if the mermaid diagram in this message has been rendered.
* Used to persist the rendering state across updates and prevent flickering.
@ -33,6 +34,26 @@ export const ChatMessage = ({ message }: { message: MessageWithError }) => { //
const { t } = useTranslation()
const { theme } = useTheme()
const [katexPlugin, setKatexPlugin] = useState<any>(null)
const [isThinkingExpanded, setIsThinkingExpanded] = useState<boolean>(false)
// Directly use props passed from the parent.
const { thinkingContent, displayContent, thinkingTime, isThinking } = message
// Reset expansion state when new thinking starts
useEffect(() => {
if (isThinking) {
// When thinking starts, always reset to collapsed state
setIsThinkingExpanded(false)
}
}, [isThinking, message.id])
// The content to display is now non-ambiguous.
const finalThinkingContent = thinkingContent
// For user messages, displayContent will be undefined, so we fall back to content.
// For assistant messages, we prefer displayContent but fallback to content for backward compatibility
const finalDisplayContent = message.role === 'user'
? message.content
: (displayContent !== undefined ? displayContent : (message.content || ''))
// Load KaTeX dynamically
useEffect(() => {
@ -59,6 +80,27 @@ export const ChatMessage = ({ message }: { message: MessageWithError }) => { //
}
}, [message, t]) // Added t to dependency array
const mainMarkdownComponents = useMemo(() => ({
code: (props: any) => (
<CodeHighlight
{...props}
renderAsDiagram={message.mermaidRendered ?? false}
/>
),
p: ({ children }: { children?: ReactNode }) => <p className="my-2">{children}</p>,
h1: ({ children }: { children?: ReactNode }) => <h1 className="text-xl font-bold mt-4 mb-2">{children}</h1>,
h2: ({ children }: { children?: ReactNode }) => <h2 className="text-lg font-bold mt-4 mb-2">{children}</h2>,
h3: ({ children }: { children?: ReactNode }) => <h3 className="text-base font-bold mt-3 mb-2">{children}</h3>,
h4: ({ children }: { children?: ReactNode }) => <h4 className="text-base font-semibold mt-3 mb-2">{children}</h4>,
ul: ({ children }: { children?: ReactNode }) => <ul className="list-disc pl-5 my-2">{children}</ul>,
ol: ({ children }: { children?: ReactNode }) => <ol className="list-decimal pl-5 my-2">{children}</ol>,
li: ({ children }: { children?: ReactNode }) => <li className="my-1">{children}</li>
}), [message.mermaidRendered]);
const thinkingMarkdownComponents = useMemo(() => ({
code: (props: any) => (<CodeHighlight {...props} renderAsDiagram={message.mermaidRendered ?? false} />)
}), [message.mermaidRendered]);
return (
<div
className={`${
@ -69,55 +111,93 @@ export const ChatMessage = ({ message }: { message: MessageWithError }) => { //
: 'w-[95%] bg-muted'
} rounded-lg px-4 py-2`}
>
<div className="relative">
<ReactMarkdown
className="prose dark:prose-invert max-w-none text-sm break-words prose-headings:mt-4 prose-headings:mb-2 prose-p:my-2 prose-ul:my-2 prose-ol:my-2 prose-li:my-1 [&_.katex]:text-current [&_.katex-display]:my-4 [&_.katex-display]:overflow-x-auto"
remarkPlugins={[remarkGfm, remarkMath]}
rehypePlugins={[
...(katexPlugin ? [[
katexPlugin,
{
errorColor: theme === 'dark' ? '#ef4444' : '#dc2626',
throwOnError: false,
displayMode: false
{/* Thinking process display - only for assistant messages */}
{message.role === 'assistant' && (isThinking || thinkingTime !== null) && (
<div className="mb-2">
<div
className="flex items-center text-gray-500 dark:text-gray-400 hover:text-gray-700 dark:hover:text-gray-200 transition-colors duration-200 text-sm cursor-pointer select-none"
onClick={() => {
// Allow expansion when there's thinking content, even during thinking process
if (finalThinkingContent && finalThinkingContent.trim() !== '') {
setIsThinkingExpanded(!isThinkingExpanded)
}
] as any] : []),
rehypeReact
]}
skipHtml={false}
// Memoize the components object to prevent unnecessary re-renders of ReactMarkdown children
components={useMemo(() => ({
code: (props: any) => ( // Add type annotation if needed, e.g., props: CodeProps from 'react-markdown/lib/ast-to-react'
<CodeHighlight
{...props}
renderAsDiagram={message.mermaidRendered ?? false}
/>
),
p: ({ children }: { children?: ReactNode }) => <p className="my-2">{children}</p>,
h1: ({ children }: { children?: ReactNode }) => <h1 className="text-xl font-bold mt-4 mb-2">{children}</h1>,
h2: ({ children }: { children?: ReactNode }) => <h2 className="text-lg font-bold mt-4 mb-2">{children}</h2>,
h3: ({ children }: { children?: ReactNode }) => <h3 className="text-base font-bold mt-3 mb-2">{children}</h3>,
h4: ({ children }: { children?: ReactNode }) => <h4 className="text-base font-semibold mt-3 mb-2">{children}</h4>,
ul: ({ children }: { children?: ReactNode }) => <ul className="list-disc pl-5 my-2">{children}</ul>,
ol: ({ children }: { children?: ReactNode }) => <ol className="list-decimal pl-5 my-2">{children}</ol>,
li: ({ children }: { children?: ReactNode }) => <li className="my-1">{children}</li>
}), [message.mermaidRendered])} // Dependency ensures update if mermaid state changes
>
{message.content}
</ReactMarkdown>
{message.role === 'assistant' && message.content && message.content.length > 0 && ( // Added check for message.content existence
<Button
onClick={handleCopyMarkdown}
className="absolute right-0 bottom-0 size-6 rounded-md opacity-20 transition-opacity hover:opacity-100"
tooltip={t('retrievePanel.chatMessage.copyTooltip')}
variant="default"
size="icon"
}}
>
<CopyIcon className="size-4" /> {/* Explicit size */}
</Button>
)}
</div>
{message.content === '' && <LoaderIcon className="animate-spin duration-2000" />} {/* Check for empty string specifically */}
{isThinking ? (
<>
<LoaderIcon className="mr-2 size-4 animate-spin" />
<span>{t('retrievePanel.chatMessage.thinking')}</span>
</>
) : (
typeof thinkingTime === 'number' && <span>{t('retrievePanel.chatMessage.thinkingTime', { time: thinkingTime })}</span>
)}
{/* Show chevron when there's thinking content, even during thinking process */}
{finalThinkingContent && finalThinkingContent.trim() !== '' && <ChevronDownIcon className={`ml-2 size-4 shrink-0 transition-transform ${isThinkingExpanded ? 'rotate-180' : ''}`} />}
</div>
{/* Show thinking content when expanded and content exists, even during thinking process */}
{isThinkingExpanded && finalThinkingContent && finalThinkingContent.trim() !== '' && (
<div className="mt-2 pl-4 border-l-2 border-primary/20 text-sm prose dark:prose-invert max-w-none break-words prose-p:my-1 prose-headings:my-2">
{isThinking && (
<div className="mb-2 text-xs text-gray-400 dark:text-gray-500 italic">
{t('retrievePanel.chatMessage.thinkingInProgress', 'Thinking in progress...')}
</div>
)}
<ReactMarkdown
remarkPlugins={[remarkGfm, remarkMath]}
rehypePlugins={[
...(katexPlugin ? [[katexPlugin, { errorColor: theme === 'dark' ? '#ef4444' : '#dc2626', throwOnError: false, displayMode: false }] as any] : []),
rehypeReact
]}
skipHtml={false}
components={thinkingMarkdownComponents}
>
{finalThinkingContent}
</ReactMarkdown>
</div>
)}
</div>
)}
{/* Main content display */}
{finalDisplayContent && (
<div className="relative">
<ReactMarkdown
className="prose dark:prose-invert max-w-none text-sm break-words prose-headings:mt-4 prose-headings:mb-2 prose-p:my-2 prose-ul:my-2 prose-ol:my-2 prose-li:my-1 [&_.katex]:text-current [&_.katex-display]:my-4 [&_.katex-display]:overflow-x-auto"
remarkPlugins={[remarkGfm, remarkMath]}
rehypePlugins={[
...(katexPlugin ? [[
katexPlugin,
{
errorColor: theme === 'dark' ? '#ef4444' : '#dc2626',
throwOnError: false,
displayMode: false
}
] as any] : []),
rehypeReact
]}
skipHtml={false}
components={mainMarkdownComponents}
>
{finalDisplayContent}
</ReactMarkdown>
{message.role === 'assistant' && finalDisplayContent && finalDisplayContent.length > 0 && (
<Button
onClick={handleCopyMarkdown}
className="absolute right-0 bottom-0 size-6 rounded-md opacity-20 transition-opacity hover:opacity-100"
tooltip={t('retrievePanel.chatMessage.copyTooltip')}
variant="default"
size="icon"
>
<CopyIcon className="size-4" /> {/* Explicit size */}
</Button>
)}
</div>
)}
{(() => {
// More comprehensive loading state check
const hasVisibleContent = finalDisplayContent && finalDisplayContent.trim() !== '';
const isLoadingState = !hasVisibleContent && !isThinking && !thinkingTime;
return isLoadingState && <LoaderIcon className="animate-spin duration-2000" />;
})()}
</div>
)
}

View file

@ -32,7 +32,7 @@ import { errorMessage } from '@/lib/utils'
import { toast } from 'sonner'
import { useBackendState } from '@/stores/state'
import { RefreshCwIcon, ActivityIcon, ArrowUpIcon, ArrowDownIcon, RotateCcwIcon, CheckSquareIcon, XIcon } from 'lucide-react'
import { RefreshCwIcon, ActivityIcon, ArrowUpIcon, ArrowDownIcon, RotateCcwIcon, CheckSquareIcon, XIcon, AlertTriangle, Info } from 'lucide-react'
import PipelineStatusDialog from '@/components/documents/PipelineStatusDialog'
type StatusFilter = DocStatus | 'all';
@ -59,6 +59,26 @@ const getDisplayFileName = (doc: DocStatusResponse, maxLength: number = 20): str
: fileName;
};
const formatMetadata = (metadata: Record<string, any>): string => {
const formattedMetadata = { ...metadata };
if (formattedMetadata.processing_start_time && typeof formattedMetadata.processing_start_time === 'number') {
const date = new Date(formattedMetadata.processing_start_time * 1000);
if (!isNaN(date.getTime())) {
formattedMetadata.processing_start_time = date.toLocaleString();
}
}
if (formattedMetadata.processing_end_time && typeof formattedMetadata.processing_end_time === 'number') {
const date = new Date(formattedMetadata.processing_end_time * 1000);
if (!isNaN(date.getTime())) {
formattedMetadata.processing_end_time = date.toLocaleString();
}
}
return JSON.stringify(formattedMetadata, null, 2);
};
const pulseStyle = `
/* Tooltip styles */
.tooltip-container {
@ -73,6 +93,7 @@ const pulseStyle = `
white-space: normal;
border-radius: 0.375rem;
padding: 0.5rem 0.75rem;
font-size: 0.75rem; /* 12px */
background-color: rgba(0, 0, 0, 0.95);
color: white;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
@ -1168,23 +1189,39 @@ export default function DocumentManager() {
</div>
</TableCell>
<TableCell>
{doc.status === 'processed' && (
<span className="text-green-600">{t('documentPanel.documentManager.status.completed')}</span>
)}
{doc.status === 'processing' && (
<span className="text-blue-600">{t('documentPanel.documentManager.status.processing')}</span>
)}
{doc.status === 'pending' && (
<span className="text-yellow-600">{t('documentPanel.documentManager.status.pending')}</span>
)}
{doc.status === 'failed' && (
<span className="text-red-600">{t('documentPanel.documentManager.status.failed')}</span>
)}
{doc.error_msg && (
<span className="ml-2 text-red-500" title={doc.error_msg}>
</span>
)}
<div className="group relative flex items-center overflow-visible tooltip-container">
{doc.status === 'processed' && (
<span className="text-green-600">{t('documentPanel.documentManager.status.completed')}</span>
)}
{doc.status === 'processing' && (
<span className="text-blue-600">{t('documentPanel.documentManager.status.processing')}</span>
)}
{doc.status === 'pending' && (
<span className="text-yellow-600">{t('documentPanel.documentManager.status.pending')}</span>
)}
{doc.status === 'failed' && (
<span className="text-red-600">{t('documentPanel.documentManager.status.failed')}</span>
)}
{/* Icon rendering logic */}
{doc.error_msg ? (
<AlertTriangle className="ml-2 h-4 w-4 text-yellow-500" />
) : (doc.metadata && Object.keys(doc.metadata).length > 0) && (
<Info className="ml-2 h-4 w-4 text-blue-500" />
)}
{/* Tooltip rendering logic */}
{(doc.error_msg || (doc.metadata && Object.keys(doc.metadata).length > 0)) && (
<div className="invisible group-hover:visible tooltip">
{doc.error_msg && (
<pre>{doc.error_msg}</pre>
)}
{doc.metadata && Object.keys(doc.metadata).length > 0 && (
<pre>{formatMetadata(doc.metadata)}</pre>
)}
</div>
)}
</div>
</TableCell>
<TableCell>{doc.content_length ?? '-'}</TableCell>
<TableCell>{doc.chunks_count ?? '-'}</TableCell>

View file

@ -58,6 +58,8 @@ export default function RetrievalTesting() {
const [inputError, setInputError] = useState('') // Error message for input
// Reference to track if we should follow scroll during streaming (using ref for synchronous updates)
const shouldFollowScrollRef = useRef(true)
const thinkingStartTime = useRef<number | null>(null)
const thinkingProcessed = useRef(false)
// Reference to track if user interaction is from the form area
const isFormInteractionRef = useRef(false)
// Reference to track if scroll was triggered programmatically
@ -67,6 +69,16 @@ export default function RetrievalTesting() {
const messagesEndRef = useRef<HTMLDivElement>(null)
const messagesContainerRef = useRef<HTMLDivElement>(null)
// Add cleanup effect for memory leak prevention
useEffect(() => {
// Component cleanup - reset timer state to prevent memory leaks
return () => {
if (thinkingStartTime.current) {
thinkingStartTime.current = null;
}
};
}, []);
// Scroll to bottom function - restored smooth scrolling with better handling
const scrollToBottom = useCallback(() => {
// Set flag to indicate this is a programmatic scroll
@ -115,6 +127,10 @@ export default function RetrievalTesting() {
// Clear error message
setInputError('')
// Reset thinking timer state for new query to prevent confusion
thinkingStartTime.current = null
thinkingProcessed.current = false
// Create messages
// Save the original input (with prefix if any) in userMessage.content for display
const userMessage: MessageWithError = {
@ -127,7 +143,11 @@ export default function RetrievalTesting() {
id: generateUniqueId(), // Use browser-compatible ID generation
content: '',
role: 'assistant',
mermaidRendered: false
mermaidRendered: false,
thinkingTime: null, // Explicitly initialize to null
thinkingContent: undefined, // Explicitly initialize to undefined
displayContent: undefined, // Explicitly initialize to undefined
isThinking: false // Explicitly initialize to false
}
const prevMessages = [...messages]
@ -153,6 +173,47 @@ export default function RetrievalTesting() {
const updateAssistantMessage = (chunk: string, isError?: boolean) => {
assistantMessage.content += chunk
// Start thinking timer on first sight of think tag
if (assistantMessage.content.includes('<think>') && !thinkingStartTime.current) {
thinkingStartTime.current = Date.now()
}
// Real-time parsing for streaming
const thinkStartTag = '<think>'
const thinkEndTag = '</think>'
const thinkStartIndex = assistantMessage.content.indexOf(thinkStartTag)
const thinkEndIndex = assistantMessage.content.indexOf(thinkEndTag)
if (thinkStartIndex !== -1) {
if (thinkEndIndex !== -1) {
// Thinking has finished for this chunk
assistantMessage.isThinking = false
// Only calculate time and extract thinking content once
if (!thinkingProcessed.current) {
if (thinkingStartTime.current && !assistantMessage.thinkingTime) {
const duration = (Date.now() - thinkingStartTime.current) / 1000
assistantMessage.thinkingTime = parseFloat(duration.toFixed(2))
}
assistantMessage.thinkingContent = assistantMessage.content
.substring(thinkStartIndex + thinkStartTag.length, thinkEndIndex)
.trim()
thinkingProcessed.current = true
}
// Always update display content as content after </think> may grow
assistantMessage.displayContent = assistantMessage.content.substring(thinkEndIndex + thinkEndTag.length).trim()
} else {
// Still thinking - update thinking content in real-time
assistantMessage.isThinking = true
assistantMessage.thinkingContent = assistantMessage.content.substring(thinkStartIndex + thinkStartTag.length)
assistantMessage.displayContent = ''
}
} else {
assistantMessage.isThinking = false
assistantMessage.displayContent = assistantMessage.content
}
// Detect if the assistant message contains a complete mermaid code block
// Simple heuristic: look for ```mermaid ... ```
const mermaidBlockRegex = /```mermaid\s+([\s\S]+?)```/g
@ -167,13 +228,21 @@ export default function RetrievalTesting() {
}
assistantMessage.mermaidRendered = mermaidRendered
// Single unified update to avoid race conditions
setMessages((prev) => {
const newMessages = [...prev]
const lastMessage = newMessages[newMessages.length - 1]
if (lastMessage.role === 'assistant') {
lastMessage.content = assistantMessage.content
lastMessage.isError = isError
lastMessage.mermaidRendered = assistantMessage.mermaidRendered
if (lastMessage && lastMessage.id === assistantMessage.id) {
// Update all properties at once to maintain consistency
Object.assign(lastMessage, {
content: assistantMessage.content,
thinkingContent: assistantMessage.thinkingContent,
displayContent: assistantMessage.displayContent,
isThinking: assistantMessage.isThinking,
isError: isError,
mermaidRendered: assistantMessage.mermaidRendered,
thinkingTime: assistantMessage.thinkingTime
})
}
return newMessages
})
@ -223,9 +292,30 @@ export default function RetrievalTesting() {
// Clear loading and add messages to state
setIsLoading(false)
isReceivingResponseRef.current = false
useSettingsStore
.getState()
.setRetrievalHistory([...prevMessages, userMessage, assistantMessage])
// Enhanced cleanup with error handling to prevent memory leaks
try {
// Final calculation for thinking time, only if not already calculated
if (assistantMessage.thinkingContent && thinkingStartTime.current && !assistantMessage.thinkingTime) {
const duration = (Date.now() - thinkingStartTime.current) / 1000
assistantMessage.thinkingTime = parseFloat(duration.toFixed(2))
}
} catch (error) {
console.error('Error calculating thinking time:', error)
} finally {
// Ensure cleanup happens regardless of errors
assistantMessage.isThinking = false;
thinkingStartTime.current = null;
}
// Save history with error handling
try {
useSettingsStore
.getState()
.setRetrievalHistory([...prevMessages, userMessage, assistantMessage])
} catch (error) {
console.error('Error saving retrieval history:', error)
}
}
},
[inputValue, isLoading, messages, setMessages, t, scrollToBottom]

View file

@ -187,7 +187,10 @@
"unknown": "غير معروف",
"object": "مصنوع",
"group": "مجموعة",
"technology": "العلوم"
"technology": "العلوم",
"product": "منتج",
"document": "وثيقة",
"other": "أخرى"
},
"sideBar": {
"settings": {
@ -334,7 +337,10 @@
"retrievePanel": {
"chatMessage": {
"copyTooltip": "نسخ إلى الحافظة",
"copyError": "فشل نسخ النص إلى الحافظة"
"copyError": "فشل نسخ النص إلى الحافظة",
"thinking": "جاري التفكير...",
"thinkingTime": "وقت التفكير {{time}} ثانية",
"thinkingInProgress": "التفكير قيد التقدم..."
},
"retrieval": {
"startPrompt": "ابدأ الاسترجاع بكتابة استفسارك أدناه",

View file

@ -187,7 +187,10 @@
"unknown": "Unknown",
"object": "Object",
"group": "Group",
"technology": "Technology"
"technology": "Technology",
"product": "Product",
"document": "Document",
"other": "Other"
},
"sideBar": {
"settings": {
@ -334,7 +337,10 @@
"retrievePanel": {
"chatMessage": {
"copyTooltip": "Copy to clipboard",
"copyError": "Failed to copy text to clipboard"
"copyError": "Failed to copy text to clipboard",
"thinking": "Thinking...",
"thinkingTime": "Thinking time {{time}}s",
"thinkingInProgress": "Thinking in progress..."
},
"retrieval": {
"startPrompt": "Start a retrieval by typing your query below",

View file

@ -187,7 +187,10 @@
"unknown": "Inconnu",
"object": "Objet",
"group": "Groupe",
"technology": "Technologie"
"technology": "Technologie",
"product": "Produit",
"document": "Document",
"other": "Autre"
},
"sideBar": {
"settings": {
@ -334,7 +337,10 @@
"retrievePanel": {
"chatMessage": {
"copyTooltip": "Copier dans le presse-papiers",
"copyError": "Échec de la copie du texte dans le presse-papiers"
"copyError": "Échec de la copie du texte dans le presse-papiers",
"thinking": "Réflexion en cours...",
"thinkingTime": "Temps de réflexion {{time}}s",
"thinkingInProgress": "Réflexion en cours..."
},
"retrieval": {
"startPrompt": "Démarrez une récupération en tapant votre requête ci-dessous",

View file

@ -187,7 +187,10 @@
"unknown": "未知",
"object": "物品",
"group": "群组",
"technology": "技术"
"technology": "技术",
"product": "产品",
"document": "文档",
"other": "其他"
},
"sideBar": {
"settings": {
@ -334,7 +337,10 @@
"retrievePanel": {
"chatMessage": {
"copyTooltip": "复制到剪贴板",
"copyError": "复制文本到剪贴板失败"
"copyError": "复制文本到剪贴板失败",
"thinking": "正在思考...",
"thinkingTime": "思考用时 {{time}} 秒",
"thinkingInProgress": "思考进行中..."
},
"retrieval": {
"startPrompt": "输入查询开始检索",

View file

@ -187,7 +187,10 @@
"unknown": "未知",
"object": "物品",
"group": "群組",
"technology": "技術"
"technology": "技術",
"product": "產品",
"document": "文檔",
"other": "其他"
},
"sideBar": {
"settings": {
@ -334,7 +337,10 @@
"retrievePanel": {
"chatMessage": {
"copyTooltip": "複製到剪貼簿",
"copyError": "複製文字到剪貼簿失敗"
"copyError": "複製文字到剪貼簿失敗",
"thinking": "正在思考...",
"thinkingTime": "思考用時 {{time}} 秒",
"thinkingInProgress": "思考進行中..."
},
"retrieval": {
"startPrompt": "輸入查詢開始檢索",