From 9d73b493c8cd26251c1eb4407b5ad49a32baa8b7 Mon Sep 17 00:00:00 2001 From: Igor Ilic Date: Tue, 20 Jan 2026 21:29:29 +0100 Subject: [PATCH] refactor: add support for Ollama embedding size definition --- .../vector/embeddings/OllamaEmbeddingEngine.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/cognee/infrastructure/databases/vector/embeddings/OllamaEmbeddingEngine.py b/cognee/infrastructure/databases/vector/embeddings/OllamaEmbeddingEngine.py index 74d33a76c..6afd056de 100644 --- a/cognee/infrastructure/databases/vector/embeddings/OllamaEmbeddingEngine.py +++ b/cognee/infrastructure/databases/vector/embeddings/OllamaEmbeddingEngine.py @@ -57,7 +57,7 @@ class OllamaEmbeddingEngine(EmbeddingEngine): model: Optional[str] = "avr/sfr-embedding-mistral:latest", dimensions: Optional[int] = 1024, max_completion_tokens: int = 512, - endpoint: Optional[str] = "http://localhost:11434/api/embeddings", + endpoint: Optional[str] = "http://localhost:11434/api/embed", huggingface_tokenizer: str = "Salesforce/SFR-Embedding-Mistral", batch_size: int = 100, ): @@ -93,6 +93,10 @@ class OllamaEmbeddingEngine(EmbeddingEngine): if self.mock: return [[0.0] * self.dimensions for _ in text] + # Handle case when a single string is passed instead of a list + if not isinstance(text, list): + text = [text] + embeddings = await asyncio.gather(*[self._get_embedding(prompt) for prompt in text]) return embeddings @@ -107,7 +111,12 @@ class OllamaEmbeddingEngine(EmbeddingEngine): """ Internal method to call the Ollama embeddings endpoint for a single prompt. """ - payload = {"model": self.model, "prompt": prompt, "input": prompt} + payload = { + "model": self.model, + "prompt": prompt, + "input": prompt, + "dimensions": self.dimensions, + } headers = {} api_key = os.getenv("LLM_API_KEY")