Update LiteLLMEmbeddingEngine.py (#1205)
This will allow to deal with the issue when the user is using custom embedding and LLM and passes the hosted_vllm option as part of the LiteLLM documentation <!-- .github/pull_request_template.md --> ## Description <!-- This allows the user to use hosted_vllm with respect to LiteLLM usage - and only gets applicable for custom embedding models - specifically Hugging Face models --> ## DCO Affirmation I affirm that all code in every commit of this pull request conforms to the terms of the Topoteretes Developer Certificate of Origin.
This commit is contained in:
parent
b54e843951
commit
a9e74dac42
1 changed files with 1 additions and 1 deletions
|
|
@ -186,7 +186,7 @@ class LiteLLMEmbeddingEngine(EmbeddingEngine):
|
|||
tokenizer = MistralTokenizer(model=model, max_tokens=self.max_tokens)
|
||||
else:
|
||||
try:
|
||||
tokenizer = HuggingFaceTokenizer(model=self.model, max_tokens=self.max_tokens)
|
||||
tokenizer = HuggingFaceTokenizer(model=self.model.replace('hosted_vllm/',""), max_tokens=self.max_tokens)
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not get tokenizer from HuggingFace due to: {e}")
|
||||
logger.info("Switching to TikToken default tokenizer.")
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue