Fix embedding token limit initialization order
* Capture max_token_size before decorator * Apply wrapper after capturing attribute * Prevent decorator from stripping dataclass * Ensure token limit is properly set
This commit is contained in:
parent
963a0a5db1
commit
de4412dd40
1 changed files with 10 additions and 6 deletions
|
|
@ -522,18 +522,22 @@ class LightRAG:
|
|||
logger.debug(f"LightRAG init with param:\n {_print_config}\n")
|
||||
|
||||
# Init Embedding
|
||||
# Step 1: Capture max_token_size before applying decorator (decorator strips dataclass attributes)
|
||||
embedding_max_token_size = None
|
||||
if self.embedding_func and hasattr(self.embedding_func, "max_token_size"):
|
||||
embedding_max_token_size = self.embedding_func.max_token_size
|
||||
logger.debug(
|
||||
f"Captured embedding max_token_size: {embedding_max_token_size}"
|
||||
)
|
||||
self.embedding_token_limit = embedding_max_token_size
|
||||
|
||||
# Step 2: Apply priority wrapper decorator
|
||||
self.embedding_func = priority_limit_async_func_call(
|
||||
self.embedding_func_max_async,
|
||||
llm_timeout=self.default_embedding_timeout,
|
||||
queue_name="Embedding func",
|
||||
)(self.embedding_func)
|
||||
|
||||
# Initialize embedding_token_limit from embedding_func
|
||||
if self.embedding_func and hasattr(self.embedding_func, "max_token_size"):
|
||||
self.embedding_token_limit = self.embedding_func.max_token_size
|
||||
else:
|
||||
self.embedding_token_limit = None
|
||||
|
||||
# Initialize all storages
|
||||
self.key_string_value_json_storage_cls: type[BaseKVStorage] = (
|
||||
self._get_storage_class(self.kv_storage)
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue