From f637f80d7ad720b457df23e08bdd776bf9d00055 Mon Sep 17 00:00:00 2001 From: Faizan Shaikh Date: Fri, 19 Dec 2025 18:35:55 +0530 Subject: [PATCH] fix: handle provider prefix in LiteLLMEmbeddingEngine tokenizer loading Signed-off-by: Faizan Shaikh --- .../databases/vector/embeddings/LiteLLMEmbeddingEngine.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cognee/infrastructure/databases/vector/embeddings/LiteLLMEmbeddingEngine.py b/cognee/infrastructure/databases/vector/embeddings/LiteLLMEmbeddingEngine.py index 12de57617..7aeb39b59 100644 --- a/cognee/infrastructure/databases/vector/embeddings/LiteLLMEmbeddingEngine.py +++ b/cognee/infrastructure/databases/vector/embeddings/LiteLLMEmbeddingEngine.py @@ -219,7 +219,7 @@ class LiteLLMEmbeddingEngine(EmbeddingEngine): else: try: tokenizer = HuggingFaceTokenizer( - model=self.model.replace("hosted_vllm/", ""), + model=self.model.replace("hosted_vllm/", "").replace("openai/", ""), max_completion_tokens=self.max_completion_tokens, ) except Exception as e: @@ -231,3 +231,4 @@ class LiteLLMEmbeddingEngine(EmbeddingEngine): logger.debug(f"Tokenizer loaded for model: {self.model}") return tokenizer +