From 2fb57e767dcb5c24ec3d11527701b4ef0f3d7277 Mon Sep 17 00:00:00 2001 From: yangdx Date: Fri, 14 Nov 2025 22:56:03 +0800 Subject: [PATCH] Fix embedding token limit initialization order * Capture max_token_size before decorator * Apply wrapper after capturing attribute * Prevent decorator from stripping dataclass * Ensure token limit is properly set --- lightrag/lightrag.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/lightrag/lightrag.py b/lightrag/lightrag.py index 6742a498..f9260332 100644 --- a/lightrag/lightrag.py +++ b/lightrag/lightrag.py @@ -523,18 +523,22 @@ class LightRAG: logger.debug(f"LightRAG init with param:\n {_print_config}\n") # Init Embedding + # Step 1: Capture max_token_size before applying decorator (decorator strips dataclass attributes) + embedding_max_token_size = None + if self.embedding_func and hasattr(self.embedding_func, "max_token_size"): + embedding_max_token_size = self.embedding_func.max_token_size + logger.debug( + f"Captured embedding max_token_size: {embedding_max_token_size}" + ) + self.embedding_token_limit = embedding_max_token_size + + # Step 2: Apply priority wrapper decorator self.embedding_func = priority_limit_async_func_call( self.embedding_func_max_async, llm_timeout=self.default_embedding_timeout, queue_name="Embedding func", )(self.embedding_func) - # Initialize embedding_token_limit from embedding_func - if self.embedding_func and hasattr(self.embedding_func, "max_token_size"): - self.embedding_token_limit = self.embedding_func.max_token_size - else: - self.embedding_token_limit = None - # Initialize all storages self.key_string_value_json_storage_cls: type[BaseKVStorage] = ( self._get_storage_class(self.kv_storage)