fix: handle provider prefix in LiteLLMEmbeddingEngine tokenizer loading

Signed-off-by: Faizan Shaikh <faizansk9292@gmail.com>
This commit is contained in:
Faizan Shaikh 2025-12-19 18:35:55 +05:30
parent 1cf0a202ce
commit f637f80d7a

View file

@ -219,7 +219,7 @@ class LiteLLMEmbeddingEngine(EmbeddingEngine):
else:
try:
tokenizer = HuggingFaceTokenizer(
model=self.model.replace("hosted_vllm/", ""),
model=self.model.replace("hosted_vllm/", "").replace("openai/", ""),
max_completion_tokens=self.max_completion_tokens,
)
except Exception as e:
@ -231,3 +231,4 @@ class LiteLLMEmbeddingEngine(EmbeddingEngine):
logger.debug(f"Tokenizer loaded for model: {self.model}")
return tokenizer