Fix embedding token limit initialization order

* Capture max_token_size before decorator
* Apply wrapper after capturing attribute
* Prevent decorator from stripping dataclass
* Ensure token limit is properly set
This commit is contained in:
yangdx 2025-11-14 22:56:03 +08:00
parent 6b2af2b579
commit 2fb57e767d

View file

@ -523,18 +523,22 @@ class LightRAG:
logger.debug(f"LightRAG init with param:\n {_print_config}\n")
# Init Embedding
# Step 1: Capture max_token_size before applying decorator (decorator strips dataclass attributes)
embedding_max_token_size = None
if self.embedding_func and hasattr(self.embedding_func, "max_token_size"):
embedding_max_token_size = self.embedding_func.max_token_size
logger.debug(
f"Captured embedding max_token_size: {embedding_max_token_size}"
)
self.embedding_token_limit = embedding_max_token_size
# Step 2: Apply priority wrapper decorator
self.embedding_func = priority_limit_async_func_call(
self.embedding_func_max_async,
llm_timeout=self.default_embedding_timeout,
queue_name="Embedding func",
)(self.embedding_func)
# Initialize embedding_token_limit from embedding_func
if self.embedding_func and hasattr(self.embedding_func, "max_token_size"):
self.embedding_token_limit = self.embedding_func.max_token_size
else:
self.embedding_token_limit = None
# Initialize all storages
self.key_string_value_json_storage_cls: type[BaseKVStorage] = (
self._get_storage_class(self.kv_storage)