from __future__ import annotations
import weakref
import sys
import asyncio
import html
import csv
import json
import logging
import logging.handlers
import os
import re
import time
import uuid
from dataclasses import dataclass
from datetime import datetime
from functools import wraps
from hashlib import md5
from typing import (
Any,
Protocol,
Callable,
TYPE_CHECKING,
List,
Optional,
Iterable,
Sequence,
Collection,
)
import numpy as np
from dotenv import load_dotenv
from lightrag.constants import (
DEFAULT_LOG_MAX_BYTES,
DEFAULT_LOG_BACKUP_COUNT,
DEFAULT_LOG_FILENAME,
GRAPH_FIELD_SEP,
DEFAULT_MAX_TOTAL_TOKENS,
DEFAULT_SOURCE_IDS_LIMIT_METHOD,
VALID_SOURCE_IDS_LIMIT_METHODS,
SOURCE_IDS_LIMIT_METHOD_FIFO,
)
# Precompile regex pattern for JSON sanitization (module-level, compiled once)
_SURROGATE_PATTERN = re.compile(r"[\uD800-\uDFFF\uFFFE\uFFFF]")
class SafeStreamHandler(logging.StreamHandler):
"""StreamHandler that gracefully handles closed streams during shutdown.
This handler prevents "ValueError: I/O operation on closed file" errors
that can occur when pytest or other test frameworks close stdout/stderr
before Python's logging cleanup runs.
"""
def flush(self):
"""Flush the stream, ignoring errors if the stream is closed."""
try:
super().flush()
except (ValueError, OSError):
# Stream is closed or otherwise unavailable, silently ignore
pass
def close(self):
"""Close the handler, ignoring errors if the stream is already closed."""
try:
super().close()
except (ValueError, OSError):
# Stream is closed or otherwise unavailable, silently ignore
pass
# Initialize logger with basic configuration
logger = logging.getLogger("lightrag")
logger.propagate = False # prevent log message send to root logger
logger.setLevel(logging.INFO)
# Add console handler if no handlers exist
if not logger.handlers:
console_handler = SafeStreamHandler()
console_handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(levelname)s: %(message)s")
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
# Set httpx logging level to WARNING
logging.getLogger("httpx").setLevel(logging.WARNING)
def _patch_ascii_colors_console_handler() -> None:
"""Prevent ascii_colors from printing flush errors during interpreter exit."""
try:
from ascii_colors import ConsoleHandler
except ImportError:
return
if getattr(ConsoleHandler, "_lightrag_patched", False):
return
original_handle_error = ConsoleHandler.handle_error
def _safe_handle_error(self, message: str) -> None: # type: ignore[override]
exc_type, _, _ = sys.exc_info()
if exc_type in (ValueError, OSError) and "close" in message.lower():
return
original_handle_error(self, message)
ConsoleHandler.handle_error = _safe_handle_error # type: ignore[assignment]
ConsoleHandler._lightrag_patched = True # type: ignore[attr-defined]
_patch_ascii_colors_console_handler()
# Global import for pypinyin with startup-time logging
try:
import pypinyin
_PYPINYIN_AVAILABLE = True
# logger.info("pypinyin loaded successfully for Chinese pinyin sorting")
except ImportError:
pypinyin = None
_PYPINYIN_AVAILABLE = False
logger.warning(
"pypinyin is not installed. Chinese pinyin sorting will use simple string sorting."
)
async def safe_vdb_operation_with_exception(
operation: Callable,
operation_name: str,
entity_name: str = "",
max_retries: int = 3,
retry_delay: float = 0.2,
logger_func: Optional[Callable] = None,
) -> None:
"""
Safely execute vector database operations with retry mechanism and exception handling.
This function ensures that VDB operations are executed with proper error handling
and retry logic. If all retries fail, it raises an exception to maintain data consistency.
Args:
operation: The async operation to execute
operation_name: Operation name for logging purposes
entity_name: Entity name for logging purposes
max_retries: Maximum number of retry attempts
retry_delay: Delay between retries in seconds
logger_func: Logger function to use for error messages
Raises:
Exception: When operation fails after all retry attempts
"""
log_func = logger_func or logger.warning
for attempt in range(max_retries):
try:
await operation()
return # Success, return immediately
except Exception as e:
if attempt >= max_retries - 1:
error_msg = f"VDB {operation_name} failed for {entity_name} after {max_retries} attempts: {e}"
log_func(error_msg)
raise Exception(error_msg) from e
else:
log_func(
f"VDB {operation_name} attempt {attempt + 1} failed for {entity_name}: {e}, retrying..."
)
if retry_delay > 0:
await asyncio.sleep(retry_delay)
def get_env_value(
env_key: str, default: any, value_type: type = str, special_none: bool = False
) -> any:
"""
Get value from environment variable with type conversion
Args:
env_key (str): Environment variable key
default (any): Default value if env variable is not set
value_type (type): Type to convert the value to
special_none (bool): If True, return None when value is "None"
Returns:
any: Converted value from environment or default
"""
value = os.getenv(env_key)
if value is None:
return default
# Handle special case for "None" string
if special_none and value == "None":
return None
if value_type is bool:
return value.lower() in ("true", "1", "yes", "t", "on")
# Handle list type with JSON parsing
if value_type is list:
try:
import json
parsed_value = json.loads(value)
# Ensure the parsed value is actually a list
if isinstance(parsed_value, list):
return parsed_value
else:
logger.warning(
f"Environment variable {env_key} is not a valid JSON list, using default"
)
return default
except (json.JSONDecodeError, ValueError) as e:
logger.warning(
f"Failed to parse {env_key} as JSON list: {e}, using default"
)
return default
try:
return value_type(value)
except (ValueError, TypeError):
return default
# Use TYPE_CHECKING to avoid circular imports
if TYPE_CHECKING:
from lightrag.base import BaseKVStorage, BaseVectorStorage, QueryParam
# use the .env that is inside the current folder
# allows to use different .env file for each lightrag instance
# the OS environment variables take precedence over the .env file
load_dotenv(dotenv_path=".env", override=False)
VERBOSE_DEBUG = os.getenv("VERBOSE", "false").lower() == "true"
def verbose_debug(msg: str, *args, **kwargs):
"""Function for outputting detailed debug information.
When VERBOSE_DEBUG=True, outputs the complete message.
When VERBOSE_DEBUG=False, outputs only the first 50 characters.
Args:
msg: The message format string
*args: Arguments to be formatted into the message
**kwargs: Keyword arguments passed to logger.debug()
"""
if VERBOSE_DEBUG:
logger.debug(msg, *args, **kwargs)
else:
# Format the message with args first
if args:
formatted_msg = msg % args
else:
formatted_msg = msg
# Then truncate the formatted message
truncated_msg = (
formatted_msg[:150] + "..." if len(formatted_msg) > 150 else formatted_msg
)
# Remove consecutive newlines
truncated_msg = re.sub(r"\n+", "\n", truncated_msg)
logger.debug(truncated_msg, **kwargs)
def set_verbose_debug(enabled: bool):
"""Enable or disable verbose debug output"""
global VERBOSE_DEBUG
VERBOSE_DEBUG = enabled
statistic_data = {"llm_call": 0, "llm_cache": 0, "embed_call": 0}
class LightragPathFilter(logging.Filter):
"""Filter for lightrag logger to filter out frequent path access logs"""
def __init__(self):
super().__init__()
# Define paths to be filtered
self.filtered_paths = [
"/documents",
"/documents/paginated",
"/health",
"/webui/",
"/documents/pipeline_status",
]
# self.filtered_paths = ["/health", "/webui/"]
def filter(self, record):
try:
# Check if record has the required attributes for an access log
if not hasattr(record, "args") or not isinstance(record.args, tuple):
return True
if len(record.args) < 5:
return True
# Extract method, path and status from the record args
method = record.args[1]
path = record.args[2]
status = record.args[4]
# Filter out successful GET/POST requests to filtered paths
if (
(method == "GET" or method == "POST")
and (status == 200 or status == 304)
and path in self.filtered_paths
):
return False
return True
except Exception:
# In case of any error, let the message through
return True
def setup_logger(
logger_name: str,
level: str = "INFO",
add_filter: bool = False,
log_file_path: str | None = None,
enable_file_logging: bool = True,
):
"""Set up a logger with console and optionally file handlers
Args:
logger_name: Name of the logger to set up
level: Log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
add_filter: Whether to add LightragPathFilter to the logger
log_file_path: Path to the log file. If None and file logging is enabled, defaults to lightrag.log in LOG_DIR or cwd
enable_file_logging: Whether to enable logging to a file (defaults to True)
"""
# Configure formatters
detailed_formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
simple_formatter = logging.Formatter("%(levelname)s: %(message)s")
logger_instance = logging.getLogger(logger_name)
logger_instance.setLevel(level)
logger_instance.handlers = [] # Clear existing handlers
logger_instance.propagate = False
# Add console handler with safe stream handling
console_handler = SafeStreamHandler()
console_handler.setFormatter(simple_formatter)
console_handler.setLevel(level)
logger_instance.addHandler(console_handler)
# Add file handler by default unless explicitly disabled
if enable_file_logging:
# Get log file path
if log_file_path is None:
log_dir = os.getenv("LOG_DIR", os.getcwd())
log_file_path = os.path.abspath(os.path.join(log_dir, DEFAULT_LOG_FILENAME))
# Ensure log directory exists
os.makedirs(os.path.dirname(log_file_path), exist_ok=True)
# Get log file max size and backup count from environment variables
log_max_bytes = get_env_value("LOG_MAX_BYTES", DEFAULT_LOG_MAX_BYTES, int)
log_backup_count = get_env_value(
"LOG_BACKUP_COUNT", DEFAULT_LOG_BACKUP_COUNT, int
)
try:
# Add file handler
file_handler = logging.handlers.RotatingFileHandler(
filename=log_file_path,
maxBytes=log_max_bytes,
backupCount=log_backup_count,
encoding="utf-8",
)
file_handler.setFormatter(detailed_formatter)
file_handler.setLevel(level)
logger_instance.addHandler(file_handler)
except PermissionError as e:
logger.warning(f"Could not create log file at {log_file_path}: {str(e)}")
logger.warning("Continuing with console logging only")
# Add path filter if requested
if add_filter:
path_filter = LightragPathFilter()
logger_instance.addFilter(path_filter)
class UnlimitedSemaphore:
"""A context manager that allows unlimited access."""
async def __aenter__(self):
pass
async def __aexit__(self, exc_type, exc, tb):
pass
@dataclass
class TaskState:
"""Task state tracking for priority queue management"""
future: asyncio.Future
start_time: float
execution_start_time: float = None
worker_started: bool = False
cancellation_requested: bool = False
cleanup_done: bool = False
@dataclass
class EmbeddingFunc:
"""Embedding function wrapper with dimension validation
This class wraps an embedding function to ensure that the output embeddings have the correct dimension.
This class should not be wrapped multiple times.
Args:
embedding_dim: Expected dimension of the embeddings
func: The actual embedding function to wrap
max_token_size: Optional token limit for the embedding model
send_dimensions: Whether to inject embedding_dim as a keyword argument
"""
embedding_dim: int
func: callable
max_token_size: int | None = None # Token limit for the embedding model
send_dimensions: bool = (
False # Control whether to send embedding_dim to the function
)
async def __call__(self, *args, **kwargs) -> np.ndarray:
# Only inject embedding_dim when send_dimensions is True
if self.send_dimensions:
# Check if user provided embedding_dim parameter
if "embedding_dim" in kwargs:
user_provided_dim = kwargs["embedding_dim"]
# If user's value differs from class attribute, output warning
if (
user_provided_dim is not None
and user_provided_dim != self.embedding_dim
):
logger.warning(
f"Ignoring user-provided embedding_dim={user_provided_dim}, "
f"using declared embedding_dim={self.embedding_dim} from decorator"
)
# Inject embedding_dim from decorator
kwargs["embedding_dim"] = self.embedding_dim
# Call the actual embedding function
result = await self.func(*args, **kwargs)
# Validate embedding dimensions using total element count
total_elements = result.size # Total number of elements in the numpy array
expected_dim = self.embedding_dim
# Check if total elements can be evenly divided by embedding_dim
if total_elements % expected_dim != 0:
raise ValueError(
f"Embedding dimension mismatch detected: "
f"total elements ({total_elements}) cannot be evenly divided by "
f"expected dimension ({expected_dim}). "
)
# Optional: Verify vector count matches input text count
actual_vectors = total_elements // expected_dim
if args and isinstance(args[0], (list, tuple)):
expected_vectors = len(args[0])
if actual_vectors != expected_vectors:
raise ValueError(
f"Vector count mismatch: "
f"expected {expected_vectors} vectors but got {actual_vectors} vectors (from embedding result)."
)
return result
def compute_args_hash(*args: Any) -> str:
"""Compute a hash for the given arguments with safe Unicode handling.
Args:
*args: Arguments to hash
Returns:
str: Hash string
"""
# Convert all arguments to strings and join them
args_str = "".join([str(arg) for arg in args])
# Use 'replace' error handling to safely encode problematic Unicode characters
# This replaces invalid characters with Unicode replacement character (U+FFFD)
try:
return md5(args_str.encode("utf-8")).hexdigest()
except UnicodeEncodeError:
# Handle surrogate characters and other encoding issues
safe_bytes = args_str.encode("utf-8", errors="replace")
return md5(safe_bytes).hexdigest()
def compute_mdhash_id(content: str, prefix: str = "") -> str:
"""
Compute a unique ID for a given content string.
The ID is a combination of the given prefix and the MD5 hash of the content string.
"""
return prefix + compute_args_hash(content)
def generate_cache_key(mode: str, cache_type: str, hash_value: str) -> str:
"""Generate a flattened cache key in the format {mode}:{cache_type}:{hash}
Args:
mode: Cache mode (e.g., 'default', 'local', 'global')
cache_type: Type of cache (e.g., 'extract', 'query', 'keywords')
hash_value: Hash value from compute_args_hash
Returns:
str: Flattened cache key
"""
return f"{mode}:{cache_type}:{hash_value}"
def parse_cache_key(cache_key: str) -> tuple[str, str, str] | None:
"""Parse a flattened cache key back into its components
Args:
cache_key: Flattened cache key in format {mode}:{cache_type}:{hash}
Returns:
tuple[str, str, str] | None: (mode, cache_type, hash) or None if invalid format
"""
parts = cache_key.split(":", 2)
if len(parts) == 3:
return parts[0], parts[1], parts[2]
return None
# Custom exception classes
class QueueFullError(Exception):
"""Raised when the queue is full and the wait times out"""
pass
class WorkerTimeoutError(Exception):
"""Worker-level timeout exception with specific timeout information"""
def __init__(self, timeout_value: float, timeout_type: str = "execution"):
self.timeout_value = timeout_value
self.timeout_type = timeout_type
super().__init__(f"Worker {timeout_type} timeout after {timeout_value}s")
class HealthCheckTimeoutError(Exception):
"""Health Check-level timeout exception"""
def __init__(self, timeout_value: float, execution_duration: float):
self.timeout_value = timeout_value
self.execution_duration = execution_duration
super().__init__(
f"Task forcefully terminated due to execution timeout (>{timeout_value}s, actual: {execution_duration:.1f}s)"
)
def priority_limit_async_func_call(
max_size: int,
llm_timeout: float = None,
max_execution_timeout: float = None,
max_task_duration: float = None,
max_queue_size: int = 1000,
cleanup_timeout: float = 2.0,
queue_name: str = "limit_async",
):
"""
Enhanced priority-limited asynchronous function call decorator with robust timeout handling
This decorator provides a comprehensive solution for managing concurrent LLM requests with:
- Multi-layer timeout protection (LLM -> Worker -> Health Check -> User)
- Task state tracking to prevent race conditions
- Enhanced health check system with stuck task detection
- Proper resource cleanup and error recovery
Args:
max_size: Maximum number of concurrent calls
max_queue_size: Maximum queue capacity to prevent memory overflow
llm_timeout: LLM provider timeout (from global config), used to calculate other timeouts
max_execution_timeout: Maximum time for worker to execute function (defaults to llm_timeout + 30s)
max_task_duration: Maximum time before health check intervenes (defaults to llm_timeout + 60s)
cleanup_timeout: Maximum time to wait for cleanup operations (defaults to 2.0s)
queue_name: Optional queue name for logging identification (defaults to "limit_async")
Returns:
Decorator function
"""
def final_decro(func):
# Ensure func is callable
if not callable(func):
raise TypeError(f"Expected a callable object, got {type(func)}")
# Calculate timeout hierarchy if llm_timeout is provided (Dynamic Timeout Calculation)
if llm_timeout is not None:
nonlocal max_execution_timeout, max_task_duration
if max_execution_timeout is None:
max_execution_timeout = (
llm_timeout * 2
) # Reserved timeout buffer for low-level retry
if max_task_duration is None:
max_task_duration = (
llm_timeout * 2 + 15
) # Reserved timeout buffer for health check phase
queue = asyncio.PriorityQueue(maxsize=max_queue_size)
tasks = set()
initialization_lock = asyncio.Lock()
counter = 0
shutdown_event = asyncio.Event()
initialized = False
worker_health_check_task = None
# Enhanced task state management
task_states = {} # task_id -> TaskState
task_states_lock = asyncio.Lock()
active_futures = weakref.WeakSet()
reinit_count = 0
async def worker():
"""Enhanced worker that processes tasks with proper timeout and state management"""
try:
while not shutdown_event.is_set():
try:
# Get task from queue with timeout for shutdown checking
try:
(
priority,
count,
task_id,
args,
kwargs,
) = await asyncio.wait_for(queue.get(), timeout=1.0)
except asyncio.TimeoutError:
continue
# Get task state and mark worker as started
async with task_states_lock:
if task_id not in task_states:
queue.task_done()
continue
task_state = task_states[task_id]
task_state.worker_started = True
# Record execution start time when worker actually begins processing
task_state.execution_start_time = (
asyncio.get_event_loop().time()
)
# Check if task was cancelled before worker started
if (
task_state.cancellation_requested
or task_state.future.cancelled()
):
async with task_states_lock:
task_states.pop(task_id, None)
queue.task_done()
continue
try:
# Execute function with timeout protection
if max_execution_timeout is not None:
result = await asyncio.wait_for(
func(*args, **kwargs), timeout=max_execution_timeout
)
else:
result = await func(*args, **kwargs)
# Set result if future is still valid
if not task_state.future.done():
task_state.future.set_result(result)
except asyncio.TimeoutError:
# Worker-level timeout (max_execution_timeout exceeded)
logger.warning(
f"{queue_name}: Worker timeout for task {task_id} after {max_execution_timeout}s"
)
if not task_state.future.done():
task_state.future.set_exception(
WorkerTimeoutError(
max_execution_timeout, "execution"
)
)
except asyncio.CancelledError:
# Task was cancelled during execution
if not task_state.future.done():
task_state.future.cancel()
logger.debug(
f"{queue_name}: Task {task_id} cancelled during execution"
)
except Exception as e:
# Function execution error
logger.error(
f"{queue_name}: Error in decorated function for task {task_id}: {str(e)}"
)
if not task_state.future.done():
task_state.future.set_exception(e)
finally:
# Clean up task state
async with task_states_lock:
task_states.pop(task_id, None)
queue.task_done()
except Exception as e:
# Critical error in worker loop
logger.error(
f"{queue_name}: Critical error in worker: {str(e)}"
)
await asyncio.sleep(0.1)
finally:
logger.debug(f"{queue_name}: Worker exiting")
async def enhanced_health_check():
"""Enhanced health check with stuck task detection and recovery"""
nonlocal initialized
try:
while not shutdown_event.is_set():
await asyncio.sleep(5) # Check every 5 seconds
current_time = asyncio.get_event_loop().time()
# Detect and handle stuck tasks based on execution start time
if max_task_duration is not None:
stuck_tasks = []
async with task_states_lock:
for task_id, task_state in list(task_states.items()):
# Only check tasks that have started execution
if (
task_state.worker_started
and task_state.execution_start_time is not None
and current_time - task_state.execution_start_time
> max_task_duration
):
stuck_tasks.append(
(
task_id,
current_time
- task_state.execution_start_time,
)
)
# Force cleanup of stuck tasks
for task_id, execution_duration in stuck_tasks:
logger.warning(
f"{queue_name}: Detected stuck task {task_id} (execution time: {execution_duration:.1f}s), forcing cleanup"
)
async with task_states_lock:
if task_id in task_states:
task_state = task_states[task_id]
if not task_state.future.done():
task_state.future.set_exception(
HealthCheckTimeoutError(
max_task_duration, execution_duration
)
)
task_states.pop(task_id, None)
# Worker recovery logic
current_tasks = set(tasks)
done_tasks = {t for t in current_tasks if t.done()}
tasks.difference_update(done_tasks)
active_tasks_count = len(tasks)
workers_needed = max_size - active_tasks_count
if workers_needed > 0:
logger.info(
f"{queue_name}: Creating {workers_needed} new workers"
)
new_tasks = set()
for _ in range(workers_needed):
task = asyncio.create_task(worker())
new_tasks.add(task)
task.add_done_callback(tasks.discard)
tasks.update(new_tasks)
except Exception as e:
logger.error(f"{queue_name}: Error in enhanced health check: {str(e)}")
finally:
logger.debug(f"{queue_name}: Enhanced health check task exiting")
initialized = False
async def ensure_workers():
"""Ensure worker system is initialized with enhanced error handling"""
nonlocal initialized, worker_health_check_task, tasks, reinit_count
if initialized:
return
async with initialization_lock:
if initialized:
return
if reinit_count > 0:
reinit_count += 1
logger.warning(
f"{queue_name}: Reinitializing system (count: {reinit_count})"
)
else:
reinit_count = 1
# Clean up completed tasks
current_tasks = set(tasks)
done_tasks = {t for t in current_tasks if t.done()}
tasks.difference_update(done_tasks)
active_tasks_count = len(tasks)
if active_tasks_count > 0 and reinit_count > 1:
logger.warning(
f"{queue_name}: {active_tasks_count} tasks still running during reinitialization"
)
# Create worker tasks
workers_needed = max_size - active_tasks_count
for _ in range(workers_needed):
task = asyncio.create_task(worker())
tasks.add(task)
task.add_done_callback(tasks.discard)
# Start enhanced health check
worker_health_check_task = asyncio.create_task(enhanced_health_check())
initialized = True
# Log dynamic timeout configuration
timeout_info = []
if llm_timeout is not None:
timeout_info.append(f"Func: {llm_timeout}s")
if max_execution_timeout is not None:
timeout_info.append(f"Worker: {max_execution_timeout}s")
if max_task_duration is not None:
timeout_info.append(f"Health Check: {max_task_duration}s")
timeout_str = (
f"(Timeouts: {', '.join(timeout_info)})" if timeout_info else ""
)
logger.info(
f"{queue_name}: {workers_needed} new workers initialized {timeout_str}"
)
async def shutdown():
"""Gracefully shut down all workers and cleanup resources"""
logger.info(f"{queue_name}: Shutting down priority queue workers")
shutdown_event.set()
# Cancel all active futures
for future in list(active_futures):
if not future.done():
future.cancel()
# Cancel all pending tasks
async with task_states_lock:
for task_id, task_state in list(task_states.items()):
if not task_state.future.done():
task_state.future.cancel()
task_states.clear()
# Wait for queue to empty with timeout
try:
await asyncio.wait_for(queue.join(), timeout=5.0)
except asyncio.TimeoutError:
logger.warning(
f"{queue_name}: Timeout waiting for queue to empty during shutdown"
)
# Cancel worker tasks
for task in list(tasks):
if not task.done():
task.cancel()
# Wait for all tasks to complete
if tasks:
await asyncio.gather(*tasks, return_exceptions=True)
# Cancel health check task
if worker_health_check_task and not worker_health_check_task.done():
worker_health_check_task.cancel()
try:
await worker_health_check_task
except asyncio.CancelledError:
pass
logger.info(f"{queue_name}: Priority queue workers shutdown complete")
@wraps(func)
async def wait_func(
*args, _priority=10, _timeout=None, _queue_timeout=None, **kwargs
):
"""
Execute function with enhanced priority-based concurrency control and timeout handling
Args:
*args: Positional arguments passed to the function
_priority: Call priority (lower values have higher priority)
_timeout: Maximum time to wait for completion (in seconds, none means determinded by max_execution_timeout of the queue)
_queue_timeout: Maximum time to wait for entering the queue (in seconds)
**kwargs: Keyword arguments passed to the function
Returns:
The result of the function call
Raises:
TimeoutError: If the function call times out at any level
QueueFullError: If the queue is full and waiting times out
Any exception raised by the decorated function
"""
await ensure_workers()
# Generate unique task ID
task_id = f"{id(asyncio.current_task())}_{asyncio.get_event_loop().time()}"
future = asyncio.Future()
# Create task state
task_state = TaskState(
future=future, start_time=asyncio.get_event_loop().time()
)
try:
# Register task state
async with task_states_lock:
task_states[task_id] = task_state
active_futures.add(future)
# Get counter for FIFO ordering
nonlocal counter
async with initialization_lock:
current_count = counter
counter += 1
# Queue the task with timeout handling
try:
if _queue_timeout is not None:
await asyncio.wait_for(
queue.put(
(_priority, current_count, task_id, args, kwargs)
),
timeout=_queue_timeout,
)
else:
await queue.put(
(_priority, current_count, task_id, args, kwargs)
)
except asyncio.TimeoutError:
raise QueueFullError(
f"{queue_name}: Queue full, timeout after {_queue_timeout} seconds"
)
except Exception as e:
# Clean up on queue error
if not future.done():
future.set_exception(e)
raise
# Wait for result with timeout handling
try:
if _timeout is not None:
return await asyncio.wait_for(future, _timeout)
else:
return await future
except asyncio.TimeoutError:
# This is user-level timeout (asyncio.wait_for caused)
# Mark cancellation request
async with task_states_lock:
if task_id in task_states:
task_states[task_id].cancellation_requested = True
# Cancel future
if not future.done():
future.cancel()
# Wait for worker cleanup with timeout
cleanup_start = asyncio.get_event_loop().time()
while (
task_id in task_states
and asyncio.get_event_loop().time() - cleanup_start
< cleanup_timeout
):
await asyncio.sleep(0.1)
raise TimeoutError(
f"{queue_name}: User timeout after {_timeout} seconds"
)
except WorkerTimeoutError as e:
# This is Worker-level timeout, directly propagate exception information
raise TimeoutError(f"{queue_name}: {str(e)}")
except HealthCheckTimeoutError as e:
# This is Health Check-level timeout, directly propagate exception information
raise TimeoutError(f"{queue_name}: {str(e)}")
finally:
# Ensure cleanup
active_futures.discard(future)
async with task_states_lock:
task_states.pop(task_id, None)
# Add shutdown method to decorated function
wait_func.shutdown = shutdown
return wait_func
return final_decro
def wrap_embedding_func_with_attrs(**kwargs):
"""Decorator to add embedding dimension and token limit attributes to embedding functions.
This decorator wraps an async embedding function and returns an EmbeddingFunc instance
that automatically handles dimension parameter injection and attribute management.
WARNING: DO NOT apply this decorator to wrapper functions that call other
decorated embedding functions. This will cause double decoration and parameter
injection conflicts.
Correct usage patterns:
1. Direct implementation (decorated):
```python
@wrap_embedding_func_with_attrs(embedding_dim=1536)
async def my_embed(texts, embedding_dim=None):
# Direct implementation
return embeddings
```
2. Wrapper calling decorated function (DO NOT decorate wrapper):
```python
# my_embed is already decorated above
async def my_wrapper(texts, **kwargs): # ❌ DO NOT decorate this!
# Must call .func to access unwrapped implementation
return await my_embed.func(texts, **kwargs)
```
3. Wrapper calling decorated function (properly decorated):
```python
@wrap_embedding_func_with_attrs(embedding_dim=1536)
async def my_wrapper(texts, **kwargs): # ✅ Can decorate if calling .func
# Calling .func avoids double decoration
return await my_embed.func(texts, **kwargs)
```
The decorated function becomes an EmbeddingFunc instance with:
- embedding_dim: The embedding dimension
- max_token_size: Maximum token limit (optional)
- func: The original unwrapped function (access via .func)
- __call__: Wrapper that injects embedding_dim parameter
Double decoration causes:
- Double injection of embedding_dim parameter
- Incorrect parameter passing to the underlying implementation
- Runtime errors due to parameter conflicts
Args:
embedding_dim: The dimension of embedding vectors
max_token_size: Maximum number of tokens (optional)
send_dimensions: Whether to inject embedding_dim as a keyword argument (optional)
Returns:
A decorator that wraps the function as an EmbeddingFunc instance
Example of correct wrapper implementation:
```python
@wrap_embedding_func_with_attrs(embedding_dim=1536, max_token_size=8192)
@retry(...)
async def openai_embed(texts, ...):
# Base implementation
pass
@wrap_embedding_func_with_attrs(embedding_dim=1536) # Note: No @retry here!
async def azure_openai_embed(texts, ...):
# CRITICAL: Call .func to access unwrapped function
return await openai_embed.func(texts, ...) # ✅ Correct
# return await openai_embed(texts, ...) # ❌ Wrong - double decoration!
```
"""
def final_decro(func) -> EmbeddingFunc:
new_func = EmbeddingFunc(**kwargs, func=func)
return new_func
return final_decro
def load_json(file_name):
if not os.path.exists(file_name):
return None
with open(file_name, encoding="utf-8-sig") as f:
return json.load(f)
def _sanitize_string_for_json(text: str) -> str:
"""Remove characters that cannot be encoded in UTF-8 for JSON serialization.
Uses regex for optimal performance with zero-copy optimization for clean strings.
Fast detection path for clean strings (99% of cases) with efficient removal for dirty strings.
Args:
text: String to sanitize
Returns:
Original string if clean (zero-copy), sanitized string if dirty
"""
if not text:
return text
# Fast path: Check if sanitization is needed using C-level regex search
if not _SURROGATE_PATTERN.search(text):
return text # Zero-copy for clean strings - most common case
# Slow path: Remove problematic characters using C-level regex substitution
return _SURROGATE_PATTERN.sub("", text)
class SanitizingJSONEncoder(json.JSONEncoder):
"""
Custom JSON encoder that sanitizes data during serialization.
This encoder cleans strings during the encoding process without creating
a full copy of the data structure, making it memory-efficient for large datasets.
"""
def encode(self, o):
"""Override encode method to handle simple string cases"""
if isinstance(o, str):
return json.encoder.encode_basestring(_sanitize_string_for_json(o))
return super().encode(o)
def iterencode(self, o, _one_shot=False):
"""
Override iterencode to sanitize strings during serialization.
This is the core method that handles complex nested structures.
"""
# Preprocess: sanitize all strings in the object
sanitized = self._sanitize_for_encoding(o)
# Call parent's iterencode with sanitized data
for chunk in super().iterencode(sanitized, _one_shot):
yield chunk
def _sanitize_for_encoding(self, obj):
"""
Recursively sanitize strings in an object.
Creates new objects only when necessary to avoid deep copies.
Args:
obj: Object to sanitize
Returns:
Sanitized object with cleaned strings
"""
if isinstance(obj, str):
return _sanitize_string_for_json(obj)
elif isinstance(obj, dict):
# Create new dict with sanitized keys and values
new_dict = {}
for k, v in obj.items():
clean_k = _sanitize_string_for_json(k) if isinstance(k, str) else k
clean_v = self._sanitize_for_encoding(v)
new_dict[clean_k] = clean_v
return new_dict
elif isinstance(obj, (list, tuple)):
# Sanitize list/tuple elements
cleaned = [self._sanitize_for_encoding(item) for item in obj]
return type(obj)(cleaned) if isinstance(obj, tuple) else cleaned
else:
# Numbers, booleans, None, etc. remain unchanged
return obj
def write_json(json_obj, file_name):
"""
Write JSON data to file with optimized sanitization strategy.
This function uses a two-stage approach:
1. Fast path: Try direct serialization (works for clean data ~99% of time)
2. Slow path: Use custom encoder that sanitizes during serialization
The custom encoder approach avoids creating a deep copy of the data,
making it memory-efficient. When sanitization occurs, the caller should
reload the cleaned data from the file to update shared memory.
Args:
json_obj: Object to serialize (may be a shallow copy from shared memory)
file_name: Output file path
Returns:
bool: True if sanitization was applied (caller should reload data),
False if direct write succeeded (no reload needed)
"""
try:
# Strategy 1: Fast path - try direct serialization
with open(file_name, "w", encoding="utf-8") as f:
json.dump(json_obj, f, indent=2, ensure_ascii=False)
return False # No sanitization needed, no reload required
except (UnicodeEncodeError, UnicodeDecodeError) as e:
logger.debug(f"Direct JSON write failed, using sanitizing encoder: {e}")
# Strategy 2: Use custom encoder (sanitizes during serialization, zero memory copy)
with open(file_name, "w", encoding="utf-8") as f:
json.dump(json_obj, f, indent=2, ensure_ascii=False, cls=SanitizingJSONEncoder)
logger.info(f"JSON sanitization applied during write: {file_name}")
return True # Sanitization applied, reload recommended
class TokenizerInterface(Protocol):
"""
Defines the interface for a tokenizer, requiring encode and decode methods.
"""
def encode(self, content: str) -> List[int]:
"""Encodes a string into a list of tokens."""
...
def decode(self, tokens: List[int]) -> str:
"""Decodes a list of tokens into a string."""
...
class Tokenizer:
"""
A wrapper around a tokenizer to provide a consistent interface for encoding and decoding.
"""
def __init__(self, model_name: str, tokenizer: TokenizerInterface):
"""
Initializes the Tokenizer with a tokenizer model name and a tokenizer instance.
Args:
model_name: The associated model name for the tokenizer.
tokenizer: An instance of a class implementing the TokenizerInterface.
"""
self.model_name: str = model_name
self.tokenizer: TokenizerInterface = tokenizer
def encode(self, content: str) -> List[int]:
"""
Encodes a string into a list of tokens using the underlying tokenizer.
Args:
content: The string to encode.
Returns:
A list of integer tokens.
"""
return self.tokenizer.encode(content)
def decode(self, tokens: List[int]) -> str:
"""
Decodes a list of tokens into a string using the underlying tokenizer.
Args:
tokens: A list of integer tokens to decode.
Returns:
The decoded string.
"""
return self.tokenizer.decode(tokens)
class TiktokenTokenizer(Tokenizer):
"""
A Tokenizer implementation using the tiktoken library.
"""
def __init__(self, model_name: str = "gpt-4o-mini"):
"""
Initializes the TiktokenTokenizer with a specified model name.
Args:
model_name: The model name for the tiktoken tokenizer to use. Defaults to "gpt-4o-mini".
Raises:
ImportError: If tiktoken is not installed.
ValueError: If the model_name is invalid.
"""
try:
import tiktoken
except ImportError:
raise ImportError(
"tiktoken is not installed. Please install it with `pip install tiktoken` or define custom `tokenizer_func`."
)
try:
tokenizer = tiktoken.encoding_for_model(model_name)
super().__init__(model_name=model_name, tokenizer=tokenizer)
except KeyError:
raise ValueError(f"Invalid model_name: {model_name}.")
def pack_user_ass_to_openai_messages(*args: str):
roles = ["user", "assistant"]
return [
{"role": roles[i % 2], "content": content} for i, content in enumerate(args)
]
def split_string_by_multi_markers(content: str, markers: list[str]) -> list[str]:
"""Split a string by multiple markers"""
if not markers:
return [content]
content = content if content is not None else ""
results = re.split("|".join(re.escape(marker) for marker in markers), content)
return [r.strip() for r in results if r.strip()]
def is_float_regex(value: str) -> bool:
return bool(re.match(r"^[-+]?[0-9]*\.?[0-9]+$", value))
def truncate_list_by_token_size(
list_data: list[Any],
key: Callable[[Any], str],
max_token_size: int,
tokenizer: Tokenizer,
) -> list[int]:
"""Truncate a list of data by token size"""
if max_token_size <= 0:
return []
tokens = 0
for i, data in enumerate(list_data):
tokens += len(tokenizer.encode(key(data)))
if tokens > max_token_size:
return list_data[:i]
return list_data
def cosine_similarity(v1, v2):
"""Calculate cosine similarity between two vectors"""
dot_product = np.dot(v1, v2)
norm1 = np.linalg.norm(v1)
norm2 = np.linalg.norm(v2)
return dot_product / (norm1 * norm2)
async def handle_cache(
hashing_kv,
args_hash,
prompt,
mode="default",
cache_type="unknown",
) -> tuple[str, int] | None:
"""Generic cache handling function with flattened cache keys
Returns:
tuple[str, int] | None: (content, create_time) if cache hit, None if cache miss
"""
if hashing_kv is None:
return None
if mode != "default": # handle cache for all type of query
if not hashing_kv.global_config.get("enable_llm_cache"):
return None
else: # handle cache for entity extraction
if not hashing_kv.global_config.get("enable_llm_cache_for_entity_extract"):
return None
# Use flattened cache key format: {mode}:{cache_type}:{hash}
flattened_key = generate_cache_key(mode, cache_type, args_hash)
cache_entry = await hashing_kv.get_by_id(flattened_key)
if cache_entry:
logger.debug(f"Flattened cache hit(key:{flattened_key})")
content = cache_entry["return"]
timestamp = cache_entry.get("create_time", 0)
return content, timestamp
logger.debug(f"Cache missed(mode:{mode} type:{cache_type})")
return None
@dataclass
class CacheData:
args_hash: str
content: str
prompt: str
mode: str = "default"
cache_type: str = "query"
chunk_id: str | None = None
queryparam: dict | None = None
async def save_to_cache(hashing_kv, cache_data: CacheData):
"""Save data to cache using flattened key structure.
Args:
hashing_kv: The key-value storage for caching
cache_data: The cache data to save
"""
# Skip if storage is None or content is a streaming response
if hashing_kv is None or not cache_data.content:
return
# If content is a streaming response, don't cache it
if hasattr(cache_data.content, "__aiter__"):
logger.debug("Streaming response detected, skipping cache")
return
# Use flattened cache key format: {mode}:{cache_type}:{hash}
flattened_key = generate_cache_key(
cache_data.mode, cache_data.cache_type, cache_data.args_hash
)
# Check if we already have identical content cached
existing_cache = await hashing_kv.get_by_id(flattened_key)
if existing_cache:
existing_content = existing_cache.get("return")
if existing_content == cache_data.content:
logger.warning(
f"Cache duplication detected for {flattened_key}, skipping update"
)
return
# Create cache entry with flattened structure
cache_entry = {
"return": cache_data.content,
"cache_type": cache_data.cache_type,
"chunk_id": cache_data.chunk_id if cache_data.chunk_id is not None else None,
"original_prompt": cache_data.prompt,
"queryparam": cache_data.queryparam
if cache_data.queryparam is not None
else None,
}
logger.info(f" == LLM cache == saving: {flattened_key}")
# Save using flattened key
await hashing_kv.upsert({flattened_key: cache_entry})
def safe_unicode_decode(content):
# Regular expression to find all Unicode escape sequences of the form \uXXXX
unicode_escape_pattern = re.compile(r"\\u([0-9a-fA-F]{4})")
# Function to replace the Unicode escape with the actual character
def replace_unicode_escape(match):
# Convert the matched hexadecimal value into the actual Unicode character
return chr(int(match.group(1), 16))
# Perform the substitution
decoded_content = unicode_escape_pattern.sub(
replace_unicode_escape, content.decode("utf-8")
)
return decoded_content
def exists_func(obj, func_name: str) -> bool:
"""Check if a function exists in an object or not.
:param obj:
:param func_name:
:return: True / False
"""
if callable(getattr(obj, func_name, None)):
return True
else:
return False
def always_get_an_event_loop() -> asyncio.AbstractEventLoop:
"""
Ensure that there is always an event loop available.
This function tries to get the current event loop. If the current event loop is closed or does not exist,
it creates a new event loop and sets it as the current event loop.
Returns:
asyncio.AbstractEventLoop: The current or newly created event loop.
"""
try:
# Try to get the current event loop
current_loop = asyncio.get_event_loop()
if current_loop.is_closed():
raise RuntimeError("Event loop is closed.")
return current_loop
except RuntimeError:
# If no event loop exists or it is closed, create a new one
logger.info("Creating a new event loop in main thread.")
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
return new_loop
async def aexport_data(
chunk_entity_relation_graph,
entities_vdb,
relationships_vdb,
output_path: str,
file_format: str = "csv",
include_vector_data: bool = False,
) -> None:
"""
Asynchronously exports all entities, relations, and relationships to various formats.
Args:
chunk_entity_relation_graph: Graph storage instance for entities and relations
entities_vdb: Vector database storage for entities
relationships_vdb: Vector database storage for relationships
output_path: The path to the output file (including extension).
file_format: Output format - "csv", "excel", "md", "txt".
- csv: Comma-separated values file
- excel: Microsoft Excel file with multiple sheets
- md: Markdown tables
- txt: Plain text formatted output
include_vector_data: Whether to include data from the vector database.
"""
# Collect data
entities_data = []
relations_data = []
relationships_data = []
# --- Entities ---
all_entities = await chunk_entity_relation_graph.get_all_labels()
for entity_name in all_entities:
# Get entity information from graph
node_data = await chunk_entity_relation_graph.get_node(entity_name)
source_id = node_data.get("source_id") if node_data else None
entity_info = {
"graph_data": node_data,
"source_id": source_id,
}
# Optional: Get vector database information
if include_vector_data:
entity_id = compute_mdhash_id(entity_name, prefix="ent-")
vector_data = await entities_vdb.get_by_id(entity_id)
entity_info["vector_data"] = vector_data
entity_row = {
"entity_name": entity_name,
"source_id": source_id,
"graph_data": str(
entity_info["graph_data"]
), # Convert to string to ensure compatibility
}
if include_vector_data and "vector_data" in entity_info:
entity_row["vector_data"] = str(entity_info["vector_data"])
entities_data.append(entity_row)
# --- Relations ---
for src_entity in all_entities:
for tgt_entity in all_entities:
if src_entity == tgt_entity:
continue
edge_exists = await chunk_entity_relation_graph.has_edge(
src_entity, tgt_entity
)
if edge_exists:
# Get edge information from graph
edge_data = await chunk_entity_relation_graph.get_edge(
src_entity, tgt_entity
)
source_id = edge_data.get("source_id") if edge_data else None
relation_info = {
"graph_data": edge_data,
"source_id": source_id,
}
# Optional: Get vector database information
if include_vector_data:
rel_id = compute_mdhash_id(src_entity + tgt_entity, prefix="rel-")
vector_data = await relationships_vdb.get_by_id(rel_id)
relation_info["vector_data"] = vector_data
relation_row = {
"src_entity": src_entity,
"tgt_entity": tgt_entity,
"source_id": relation_info["source_id"],
"graph_data": str(relation_info["graph_data"]), # Convert to string
}
if include_vector_data and "vector_data" in relation_info:
relation_row["vector_data"] = str(relation_info["vector_data"])
relations_data.append(relation_row)
# --- Relationships (from VectorDB) ---
all_relationships = await relationships_vdb.client_storage
for rel in all_relationships["data"]:
relationships_data.append(
{
"relationship_id": rel["__id__"],
"data": str(rel), # Convert to string for compatibility
}
)
# Export based on format
if file_format == "csv":
# CSV export
with open(output_path, "w", newline="", encoding="utf-8") as csvfile:
# Entities
if entities_data:
csvfile.write("# ENTITIES\n")
writer = csv.DictWriter(csvfile, fieldnames=entities_data[0].keys())
writer.writeheader()
writer.writerows(entities_data)
csvfile.write("\n\n")
# Relations
if relations_data:
csvfile.write("# RELATIONS\n")
writer = csv.DictWriter(csvfile, fieldnames=relations_data[0].keys())
writer.writeheader()
writer.writerows(relations_data)
csvfile.write("\n\n")
# Relationships
if relationships_data:
csvfile.write("# RELATIONSHIPS\n")
writer = csv.DictWriter(
csvfile, fieldnames=relationships_data[0].keys()
)
writer.writeheader()
writer.writerows(relationships_data)
elif file_format == "excel":
# Excel export
import pandas as pd
entities_df = pd.DataFrame(entities_data) if entities_data else pd.DataFrame()
relations_df = (
pd.DataFrame(relations_data) if relations_data else pd.DataFrame()
)
relationships_df = (
pd.DataFrame(relationships_data) if relationships_data else pd.DataFrame()
)
with pd.ExcelWriter(output_path, engine="xlsxwriter") as writer:
if not entities_df.empty:
entities_df.to_excel(writer, sheet_name="Entities", index=False)
if not relations_df.empty:
relations_df.to_excel(writer, sheet_name="Relations", index=False)
if not relationships_df.empty:
relationships_df.to_excel(
writer, sheet_name="Relationships", index=False
)
elif file_format == "md":
# Markdown export
with open(output_path, "w", encoding="utf-8") as mdfile:
mdfile.write("# LightRAG Data Export\n\n")
# Entities
mdfile.write("## Entities\n\n")
if entities_data:
# Write header
mdfile.write("| " + " | ".join(entities_data[0].keys()) + " |\n")
mdfile.write(
"| " + " | ".join(["---"] * len(entities_data[0].keys())) + " |\n"
)
# Write rows
for entity in entities_data:
mdfile.write(
"| " + " | ".join(str(v) for v in entity.values()) + " |\n"
)
mdfile.write("\n\n")
else:
mdfile.write("*No entity data available*\n\n")
# Relations
mdfile.write("## Relations\n\n")
if relations_data:
# Write header
mdfile.write("| " + " | ".join(relations_data[0].keys()) + " |\n")
mdfile.write(
"| " + " | ".join(["---"] * len(relations_data[0].keys())) + " |\n"
)
# Write rows
for relation in relations_data:
mdfile.write(
"| " + " | ".join(str(v) for v in relation.values()) + " |\n"
)
mdfile.write("\n\n")
else:
mdfile.write("*No relation data available*\n\n")
# Relationships
mdfile.write("## Relationships\n\n")
if relationships_data:
# Write header
mdfile.write("| " + " | ".join(relationships_data[0].keys()) + " |\n")
mdfile.write(
"| "
+ " | ".join(["---"] * len(relationships_data[0].keys()))
+ " |\n"
)
# Write rows
for relationship in relationships_data:
mdfile.write(
"| "
+ " | ".join(str(v) for v in relationship.values())
+ " |\n"
)
else:
mdfile.write("*No relationship data available*\n\n")
elif file_format == "txt":
# Plain text export
with open(output_path, "w", encoding="utf-8") as txtfile:
txtfile.write("LIGHTRAG DATA EXPORT\n")
txtfile.write("=" * 80 + "\n\n")
# Entities
txtfile.write("ENTITIES\n")
txtfile.write("-" * 80 + "\n")
if entities_data:
# Create fixed width columns
col_widths = {
k: max(len(k), max(len(str(e[k])) for e in entities_data))
for k in entities_data[0]
}
header = " ".join(k.ljust(col_widths[k]) for k in entities_data[0])
txtfile.write(header + "\n")
txtfile.write("-" * len(header) + "\n")
# Write rows
for entity in entities_data:
row = " ".join(
str(v).ljust(col_widths[k]) for k, v in entity.items()
)
txtfile.write(row + "\n")
txtfile.write("\n\n")
else:
txtfile.write("No entity data available\n\n")
# Relations
txtfile.write("RELATIONS\n")
txtfile.write("-" * 80 + "\n")
if relations_data:
# Create fixed width columns
col_widths = {
k: max(len(k), max(len(str(r[k])) for r in relations_data))
for k in relations_data[0]
}
header = " ".join(k.ljust(col_widths[k]) for k in relations_data[0])
txtfile.write(header + "\n")
txtfile.write("-" * len(header) + "\n")
# Write rows
for relation in relations_data:
row = " ".join(
str(v).ljust(col_widths[k]) for k, v in relation.items()
)
txtfile.write(row + "\n")
txtfile.write("\n\n")
else:
txtfile.write("No relation data available\n\n")
# Relationships
txtfile.write("RELATIONSHIPS\n")
txtfile.write("-" * 80 + "\n")
if relationships_data:
# Create fixed width columns
col_widths = {
k: max(len(k), max(len(str(r[k])) for r in relationships_data))
for k in relationships_data[0]
}
header = " ".join(
k.ljust(col_widths[k]) for k in relationships_data[0]
)
txtfile.write(header + "\n")
txtfile.write("-" * len(header) + "\n")
# Write rows
for relationship in relationships_data:
row = " ".join(
str(v).ljust(col_widths[k]) for k, v in relationship.items()
)
txtfile.write(row + "\n")
else:
txtfile.write("No relationship data available\n\n")
else:
raise ValueError(
f"Unsupported file format: {file_format}. Choose from: csv, excel, md, txt"
)
if file_format is not None:
print(f"Data exported to: {output_path} with format: {file_format}")
else:
print("Data displayed as table format")
def export_data(
chunk_entity_relation_graph,
entities_vdb,
relationships_vdb,
output_path: str,
file_format: str = "csv",
include_vector_data: bool = False,
) -> None:
"""
Synchronously exports all entities, relations, and relationships to various formats.
Args:
chunk_entity_relation_graph: Graph storage instance for entities and relations
entities_vdb: Vector database storage for entities
relationships_vdb: Vector database storage for relationships
output_path: The path to the output file (including extension).
file_format: Output format - "csv", "excel", "md", "txt".
- csv: Comma-separated values file
- excel: Microsoft Excel file with multiple sheets
- md: Markdown tables
- txt: Plain text formatted output
include_vector_data: Whether to include data from the vector database.
"""
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(
aexport_data(
chunk_entity_relation_graph,
entities_vdb,
relationships_vdb,
output_path,
file_format,
include_vector_data,
)
)
def lazy_external_import(module_name: str, class_name: str) -> Callable[..., Any]:
"""Lazily import a class from an external module based on the package of the caller."""
# Get the caller's module and package
import inspect
caller_frame = inspect.currentframe().f_back
module = inspect.getmodule(caller_frame)
package = module.__package__ if module else None
def import_class(*args: Any, **kwargs: Any):
import importlib
module = importlib.import_module(module_name, package=package)
cls = getattr(module, class_name)
return cls(*args, **kwargs)
return import_class
async def update_chunk_cache_list(
chunk_id: str,
text_chunks_storage: "BaseKVStorage",
cache_keys: list[str],
cache_scenario: str = "batch_update",
) -> None:
"""Update chunk's llm_cache_list with the given cache keys
Args:
chunk_id: Chunk identifier
text_chunks_storage: Text chunks storage instance
cache_keys: List of cache keys to add to the list
cache_scenario: Description of the cache scenario for logging
"""
if not cache_keys:
return
try:
chunk_data = await text_chunks_storage.get_by_id(chunk_id)
if chunk_data:
# Ensure llm_cache_list exists
if "llm_cache_list" not in chunk_data:
chunk_data["llm_cache_list"] = []
# Add cache keys to the list if not already present
existing_keys = set(chunk_data["llm_cache_list"])
new_keys = [key for key in cache_keys if key not in existing_keys]
if new_keys:
chunk_data["llm_cache_list"].extend(new_keys)
# Update the chunk in storage
await text_chunks_storage.upsert({chunk_id: chunk_data})
logger.debug(
f"Updated chunk {chunk_id} with {len(new_keys)} cache keys ({cache_scenario})"
)
except Exception as e:
logger.warning(
f"Failed to update chunk {chunk_id} with cache references on {cache_scenario}: {e}"
)
def remove_think_tags(text: str) -> str:
"""Remove
|
", "", name, flags=re.IGNORECASE) name = re.sub(r"|