From d62c203147deb942758ba087e6efbb416185a50c Mon Sep 17 00:00:00 2001 From: Daniel Chalef <131175+danielchalef@users.noreply.github.com> Date: Tue, 26 Aug 2025 14:49:51 -0700 Subject: [PATCH] docs: Update Ollama integration to use OpenAIGenericClient (#866) - Replace OpenAIClient with OpenAIGenericClient in Ollama documentation - Add bash code block formatting for model installation commands - Update API key placeholder from 'abc' to 'ollama' for clarity - Add comment clarifying Ollama's OpenAI-compatible endpoint Co-authored-by: claude[bot] <209825114+claude[bot]@users.noreply.github.com> Co-authored-by: Daniel Chalef --- README.md | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index b2560dcd..e929086e 100644 --- a/README.md +++ b/README.md @@ -431,25 +431,27 @@ The Gemini reranker uses the `gemini-2.5-flash-lite-preview-06-17` model by defa Graphiti supports Ollama for running local LLMs and embedding models via Ollama's OpenAI-compatible API. This is ideal for privacy-focused applications or when you want to avoid API costs. Install the models: +```bash ollama pull deepseek-r1:7b # LLM ollama pull nomic-embed-text # embeddings +``` ```python from graphiti_core import Graphiti from graphiti_core.llm_client.config import LLMConfig -from graphiti_core.llm_client.openai_client import OpenAIClient +from graphiti_core.llm_client.openai_generic_client import OpenAIGenericClient from graphiti_core.embedder.openai import OpenAIEmbedder, OpenAIEmbedderConfig from graphiti_core.cross_encoder.openai_reranker_client import OpenAIRerankerClient # Configure Ollama LLM client llm_config = LLMConfig( - api_key="abc", # Ollama doesn't require a real API key + api_key="ollama", # Ollama doesn't require a real API key, but some placeholder is needed model="deepseek-r1:7b", small_model="deepseek-r1:7b", - base_url="http://localhost:11434/v1", # Ollama provides this port + base_url="http://localhost:11434/v1", # Ollama's OpenAI-compatible endpoint ) -llm_client = OpenAIClient(config=llm_config) +llm_client = OpenAIGenericClient(config=llm_config) # Initialize Graphiti with Ollama clients graphiti = Graphiti( @@ -459,7 +461,7 @@ graphiti = Graphiti( llm_client=llm_client, embedder=OpenAIEmbedder( config=OpenAIEmbedderConfig( - api_key="abc", + api_key="ollama", # Placeholder API key embedding_model="nomic-embed-text", embedding_dim=768, base_url="http://localhost:11434/v1",