From 39b49e92ffa1fe62d8a00932010d5fb940dec030 Mon Sep 17 00:00:00 2001 From: yangdx Date: Fri, 14 Nov 2025 20:58:41 +0800 Subject: [PATCH] Convert embedding_token_limit from property to field with __post_init__ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit • Remove property decorator • Add field with init=False • Set value in __post_init__ method • embedding_token_limit is now in config dictionary --- lightrag/lightrag.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/lightrag/lightrag.py b/lightrag/lightrag.py index 72a4dc6d..1f0aa9d9 100644 --- a/lightrag/lightrag.py +++ b/lightrag/lightrag.py @@ -276,12 +276,8 @@ class LightRAG: embedding_func: EmbeddingFunc | None = field(default=None) """Function for computing text embeddings. Must be set before use.""" - @property - def embedding_token_limit(self) -> int | None: - """Get the token limit for embedding model from embedding_func.""" - if self.embedding_func and hasattr(self.embedding_func, "max_token_size"): - return self.embedding_func.max_token_size - return None + embedding_token_limit: int | None = field(default=None, init=False) + """Token limit for embedding model. Set automatically from embedding_func.max_token_size in __post_init__.""" embedding_batch_num: int = field(default=int(os.getenv("EMBEDDING_BATCH_NUM", 10))) """Batch size for embedding computations.""" @@ -532,6 +528,12 @@ class LightRAG: queue_name="Embedding func", )(self.embedding_func) + # Initialize embedding_token_limit from embedding_func + if self.embedding_func and hasattr(self.embedding_func, "max_token_size"): + self.embedding_token_limit = self.embedding_func.max_token_size + else: + self.embedding_token_limit = None + # Initialize all storages self.key_string_value_json_storage_cls: type[BaseKVStorage] = ( self._get_storage_class(self.kv_storage)