Fix graphiti-mcp-varming package for PyPI publication

MCP Server Package Fixes:
- Add build-system configuration to pyproject.toml
- Fix module imports to use relative imports for proper packaging
- Fix TypedDict import for Python 3.10 compatibility
- Remove unsupported MCP SDK parameters (tags, meta)
- Add GitHub Actions workflow for automatic PyPI publishing
- Add PyPI publishing documentation and checklist

Code Quality Improvements:
- Fix code formatting in graphiti_core (line length, whitespace)

This prepares v1.0.0 for publication to PyPI, enabling users to install
with: uvx graphiti-mcp-varming

🤖 Generated with Claude Code (https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Lars Varming 2025-11-09 19:30:43 +01:00
parent b970f19d31
commit eddeda67b3
11 changed files with 457 additions and 90 deletions

50
.github/workflows/publish-mcp-pypi.yml vendored Normal file
View file

@ -0,0 +1,50 @@
name: Publish MCP Server to PyPI
on:
push:
tags:
- 'mcp-v*.*.*' # Triggers on tags like mcp-v1.0.0
workflow_dispatch: # Allow manual triggering
jobs:
publish:
name: Publish to PyPI
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Install uv
uses: astral-sh/setup-uv@v4
with:
enable-cache: true
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.10'
- name: Build package
working-directory: mcp_server
run: |
# Remove local graphiti-core override for PyPI build
sed -i '/\[tool\.uv\.sources\]/,/graphiti-core/d' pyproject.toml
# Build the package
uv build
- name: Publish to PyPI
working-directory: mcp_server
env:
UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
run: |
uv publish
- name: Create GitHub Release
uses: softprops/action-gh-release@v1
with:
files: mcp_server/dist/*
generate_release_notes: true
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View file

@ -106,7 +106,7 @@ class Neo4jDriver(GraphDriver):
for query in index_queries
]
)
async def health_check(self) -> None:
"""Check Neo4j connectivity by running the driver's verify_connectivity method."""
try:

View file

@ -166,13 +166,17 @@ class BaseOpenAIClient(LLMClient):
except openai.RateLimitError as e:
raise RateLimitError from e
except openai.AuthenticationError as e:
logger.error(f'OpenAI Authentication Error: {e}. Please verify your API key is correct.')
logger.error(
f'OpenAI Authentication Error: {e}. Please verify your API key is correct.'
)
raise
except Exception as e:
# Provide more context for connection errors
error_msg = str(e)
if 'Connection error' in error_msg or 'connection' in error_msg.lower():
logger.error(f'Connection error communicating with OpenAI API. Please check your network connection and API key. Error: {e}')
logger.error(
f'Connection error communicating with OpenAI API. Please check your network connection and API key. Error: {e}'
)
else:
logger.error(f'Error in generating LLM response: {e}')
raise

View file

@ -74,7 +74,9 @@ class OpenAIClient(BaseOpenAIClient):
):
"""Create a structured completion using OpenAI's beta parse API."""
# Reasoning models (gpt-5 family) don't support temperature
is_reasoning_model = model.startswith('gpt-5') or model.startswith('o1') or model.startswith('o3')
is_reasoning_model = (
model.startswith('gpt-5') or model.startswith('o1') or model.startswith('o3')
)
response = await self.client.responses.parse(
model=model,
@ -100,7 +102,9 @@ class OpenAIClient(BaseOpenAIClient):
):
"""Create a regular completion with JSON format."""
# Reasoning models (gpt-5 family) don't support temperature
is_reasoning_model = model.startswith('gpt-5') or model.startswith('o1') or model.startswith('o3')
is_reasoning_model = (
model.startswith('gpt-5') or model.startswith('o1') or model.startswith('o3')
)
return await self.client.chat.completions.create(
model=model,

View file

@ -0,0 +1,64 @@
# Publishing Checklist
Use this checklist for your first PyPI publication.
## Pre-Publishing Setup
- [ ] **Update email in `pyproject.toml`** (optional but recommended)
- File: `mcp_server/pyproject.toml`
- Line: `{name = "Varming", email = "your-email@example.com"}`
- [ ] **Add PyPI token to GitHub Secrets**
- URL: https://github.com/Varming73/graphiti/settings/secrets/actions
- Secret name: `PYPI_API_TOKEN`
- Secret value: Your PyPI token from https://pypi.org/manage/account/token/
## Publishing Steps
- [ ] **Commit all changes**
```bash
git add .
git commit -m "Prepare graphiti-mcp-varming v1.0.0 for PyPI"
git push
```
- [ ] **Create and push release tag**
```bash
git tag mcp-v1.0.0
git push origin mcp-v1.0.0
```
- [ ] **Monitor GitHub Actions workflow**
- URL: https://github.com/Varming73/graphiti/actions
- Workflow name: "Publish MCP Server to PyPI"
- Expected duration: 2-3 minutes
## Post-Publishing Verification
- [ ] **Check PyPI page**
- URL: https://pypi.org/project/graphiti-mcp-varming/
- Verify version shows as `1.0.0`
- Check description and links are correct
- [ ] **Test installation**
```bash
uvx graphiti-mcp-varming --help
```
- [ ] **Test in LibreChat** (if applicable)
- Update `librechat.yaml` with `uvx graphiti-mcp-varming`
- Restart LibreChat
- Verify tools appear in UI
## If Something Goes Wrong
Common issues and solutions in `PYPI_PUBLISHING.md`
- Authentication error → Check token in GitHub secrets
- File already exists → Version already published, bump version number
- Workflow doesn't trigger → Check tag format is `mcp-v*.*.*`
- Package not found → Wait a few minutes for PyPI to propagate
---
**After first successful publish, this checklist can be deleted!**

View file

@ -0,0 +1,126 @@
# PyPI Publishing Setup Instructions
This guide explains how to publish the `graphiti-mcp-varming` package to PyPI.
## One-Time Setup
### 1. Add PyPI Token to GitHub Secrets
1. Go to your repository on GitHub: https://github.com/Varming73/graphiti
2. Click **Settings****Secrets and variables** → **Actions**
3. Click **New repository secret**
4. Name: `PYPI_API_TOKEN`
5. Value: Paste your PyPI API token (starts with `pypi-`)
6. Click **Add secret**
## Publishing a New Version
### Option 1: Automatic Publishing (Recommended)
1. Update the version in `mcp_server/pyproject.toml`:
```toml
version = "1.0.1" # Increment version
```
2. Commit the change:
```bash
cd mcp_server
git add pyproject.toml
git commit -m "Bump MCP server version to 1.0.1"
git push
```
3. Create and push a tag:
```bash
git tag mcp-v1.0.1
git push origin mcp-v1.0.1
```
4. GitHub Actions will automatically:
- Build the package
- Publish to PyPI
- Create a GitHub release
5. Monitor the workflow:
- Go to **Actions** tab in GitHub
- Watch the "Publish MCP Server to PyPI" workflow
### Option 2: Manual Publishing
If you prefer to publish manually:
```bash
cd mcp_server
# Remove local graphiti-core override
sed -i.bak '/\[tool\.uv\.sources\]/,/graphiti-core/d' pyproject.toml
# Build the package
uv build
# Publish to PyPI
uv publish --token your-pypi-token-here
# Restore the backup if needed for local development
mv pyproject.toml.bak pyproject.toml
```
## After Publishing
Users can install your package with:
```bash
# Basic installation (Neo4j support included)
uvx graphiti-mcp-varming
# With FalkorDB support
uvx --with graphiti-mcp-varming[falkordb] graphiti-mcp-varming
# With all LLM providers (Anthropic, Groq, Gemini, Voyage, etc.)
uvx --with graphiti-mcp-varming[providers] graphiti-mcp-varming
# With everything
uvx --with graphiti-mcp-varming[all] graphiti-mcp-varming
```
## Version Numbering
Follow [Semantic Versioning](https://semver.org/):
- **MAJOR** (1.0.0 → 2.0.0): Breaking changes
- **MINOR** (1.0.0 → 1.1.0): New features, backwards compatible
- **PATCH** (1.0.0 → 1.0.1): Bug fixes
## Tag Naming Convention
Use `mcp-v{VERSION}` format for tags:
- `mcp-v1.0.0` - Initial release
- `mcp-v1.0.1` - Patch release
- `mcp-v1.1.0` - Minor release
- `mcp-v2.0.0` - Major release
This distinguishes MCP server releases from graphiti-core releases.
## Troubleshooting
### Publishing Fails with "File already exists"
You tried to publish a version that already exists on PyPI. Increment the version number in `pyproject.toml` and try again.
### "Invalid or missing authentication token"
The PyPI token in GitHub secrets is incorrect or expired:
1. Generate a new token at https://pypi.org/manage/account/token/
2. Update the `PYPI_API_TOKEN` secret in GitHub
### Workflow doesn't trigger
Make sure:
- Tag matches pattern `mcp-v*.*.*`
- Tag is pushed to GitHub: `git push origin mcp-v1.0.0`
- Workflow file is on the `main` branch
## Checking Published Package
After publishing, verify at:
- PyPI page: https://pypi.org/project/graphiti-mcp-varming/
- Test installation: `uvx graphiti-mcp-varming --help`

View file

@ -0,0 +1,139 @@
# PyPI Package Setup Complete! 🚀
## Summary
Your Graphiti MCP Server is now ready to be published to PyPI as `graphiti-mcp-varming`!
## What Was Done
### 1. ✅ Package Configuration (`mcp_server/pyproject.toml`)
- **Name**: `graphiti-mcp-varming` (clearly distinguishes your enhanced fork)
- **Entry point**: `graphiti-mcp-varming` command added
- **Dependencies**: Standalone package with:
- `graphiti-core>=0.16.0` (includes Neo4j support by default)
- All MCP and OpenAI dependencies
- Optional extras for FalkorDB, other LLM providers (Anthropic, Groq, Gemini, Voyage)
- **Metadata**: Proper description, keywords, classifiers for PyPI
### 2. ✅ GitHub Actions Workflow (`.github/workflows/publish-mcp-pypi.yml`)
- Automatic publishing to PyPI when you push tags like `mcp-v1.0.0`
- Builds package with `uv build`
- Publishes with `uv publish`
- Creates GitHub releases with dist files
### 3. ✅ Documentation Updates
- **`DOCS/LibreChat-Unraid-Stdio-Setup.md`**: Updated with `uvx` commands
- **`mcp_server/PYPI_PUBLISHING.md`**: Complete publishing guide
- **`mcp_server/README.md`**: Added PyPI package notice
## Next Steps (What You Need To Do)
### 1. Add PyPI Token to GitHub
1. Go to: https://github.com/Varming73/graphiti/settings/secrets/actions
2. Click **"New repository secret"**
3. Name: `PYPI_API_TOKEN`
4. Value: Your PyPI token (starts with `pypi-`)
5. Click **"Add secret"**
### 2. Update Your Email in pyproject.toml (Optional)
Edit `mcp_server/pyproject.toml` line with your real email:
```toml
authors = [
{name = "Varming", email = "your-real-email@example.com"}
]
```
### 3. Publish First Version
```bash
# Make sure all changes are committed
git add .
git commit -m "Prepare graphiti-mcp-varming for PyPI publishing"
git push
# Create and push the first release tag
git tag mcp-v1.0.0
git push origin mcp-v1.0.0
```
### 4. Monitor Publishing
- Go to: https://github.com/Varming73/graphiti/actions
- Watch the "Publish MCP Server to PyPI" workflow
- Should complete in ~2-3 minutes
### 5. Verify Publication
After workflow completes:
- Check PyPI: https://pypi.org/project/graphiti-mcp-varming/
- Test installation: `uvx graphiti-mcp-varming --help`
## Usage After Publishing
Users can now install your enhanced MCP server easily:
### Basic Installation (Neo4j support included)
```bash
uvx graphiti-mcp-varming
```
### With FalkorDB Support
```bash
uvx --with graphiti-mcp-varming[falkordb] graphiti-mcp-varming
```
### With All LLM Providers
```bash
uvx --with graphiti-mcp-varming[providers] graphiti-mcp-varming
```
### With Everything
```bash
uvx --with graphiti-mcp-varming[all] graphiti-mcp-varming
```
### In LibreChat (stdio mode)
```yaml
mcpServers:
graphiti:
type: stdio
command: uvx
args:
- graphiti-mcp-varming
env:
GRAPHITI_GROUP_ID: "{{LIBRECHAT_USER_ID}}"
NEO4J_URI: "bolt://neo4j:7687"
NEO4J_USER: "neo4j"
NEO4J_PASSWORD: "your_password"
OPENAI_API_KEY: "${OPENAI_API_KEY}"
```
## Future Releases
To publish updates:
1. Update version in `mcp_server/pyproject.toml`
2. Commit and push changes
3. Create and push new tag: `git tag mcp-v1.0.1 && git push origin mcp-v1.0.1`
4. GitHub Actions handles the rest!
## Package Features
Your `graphiti-mcp-varming` package includes:
**Standalone**: All dependencies bundled (graphiti-core, neo4j driver, etc.)
**Multi-database**: Neo4j (default) + optional FalkorDB support
**Multi-LLM**: OpenAI (default) + optional Anthropic, Groq, Gemini, Azure
**Enhanced Tools**: Your custom `get_entities_by_type` and `compare_facts_over_time`
**Per-user Isolation**: Full support for LibreChat multi-user via stdio mode
**Easy Install**: One command with `uvx`
## Troubleshooting
See `mcp_server/PYPI_PUBLISHING.md` for detailed troubleshooting guide.
---
**Questions?** The publishing guide has all the details: `mcp_server/PYPI_PUBLISHING.md`

View file

@ -1,21 +1,59 @@
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.hatch.build.targets.wheel]
packages = ["src"]
[tool.hatch.metadata]
allow-direct-references = true
[project]
name = "mcp-server"
name = "graphiti-mcp-varming"
version = "1.0.0"
description = "Graphiti MCP Server"
description = "Graphiti MCP Server - Enhanced fork with additional tools by Varming"
readme = "README.md"
requires-python = ">=3.10,<4"
license = {text = "Apache-2.0"}
authors = [
{name = "Varming", email = "varming@example.com"}
]
keywords = ["mcp", "graphiti", "knowledge-graph", "llm", "ai"]
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
]
dependencies = [
"mcp>=1.21.0",
"openai>=1.91.0",
"graphiti-core[falkordb]>=0.16.0",
"graphiti-core>=0.16.0", # Includes neo4j driver by default
"pydantic-settings>=2.0.0",
"pyyaml>=6.0",
]
[project.scripts]
graphiti-mcp-varming = "src.graphiti_mcp_server:main"
[project.urls]
Homepage = "https://github.com/Varming73/graphiti"
Repository = "https://github.com/Varming73/graphiti"
Issues = "https://github.com/Varming73/graphiti/issues"
[project.optional-dependencies]
# FalkorDB support (Neo4j is included in graphiti-core by default)
falkordb = ["graphiti-core[falkordb]>=0.16.0"]
# Azure support
azure = [
"azure-identity>=1.21.0",
]
# LLM/Embedder providers
providers = [
"google-genai>=1.8.0",
"anthropic>=0.49.0",
@ -23,6 +61,18 @@ providers = [
"voyageai>=0.2.3",
"sentence-transformers>=2.0.0",
]
# All optional features
all = [
"graphiti-core[falkordb]>=0.16.0",
"azure-identity>=1.21.0",
"google-genai>=1.8.0",
"anthropic>=0.49.0",
"groq>=0.2.0",
"voyageai>=0.2.3",
"sentence-transformers>=2.0.0",
]
dev = [
"graphiti-core>=0.16.0",
"httpx>=0.28.1",
@ -63,8 +113,9 @@ quote-style = "single"
indent-style = "space"
docstring-code-format = true
[tool.uv.sources]
graphiti-core = { path = "../", editable = true }
# Note: For local development, you can override graphiti-core source:
# [tool.uv.sources]
# graphiti-core = { path = "../", editable = true }
[dependency-groups]
dev = [

View file

@ -21,8 +21,8 @@ from mcp.server.fastmcp import FastMCP
from pydantic import BaseModel
from starlette.responses import JSONResponse
from config.schema import GraphitiConfig, ServerConfig
from models.response_types import (
from .config.schema import GraphitiConfig, ServerConfig
from .models.response_types import (
EpisodeSearchResponse,
ErrorResponse,
FactSearchResponse,
@ -31,9 +31,9 @@ from models.response_types import (
StatusResponse,
SuccessResponse,
)
from services.factories import DatabaseDriverFactory, EmbedderFactory, LLMClientFactory
from services.queue_service import QueueService
from utils.formatting import format_fact_result
from .services.factories import DatabaseDriverFactory, EmbedderFactory, LLMClientFactory
from .services.queue_service import QueueService
from .utils.formatting import format_fact_result
# Load .env file from mcp_server directory
mcp_server_dir = Path(__file__).parent.parent
@ -326,12 +326,6 @@ class GraphitiService:
'idempotentHint': True,
'openWorldHint': True,
},
tags={'write', 'memory', 'ingestion', 'core'},
meta={
'version': '1.0',
'category': 'core',
'priority': 0.9,
},
)
async def add_memory(
name: str,
@ -440,12 +434,6 @@ async def add_memory(
'idempotentHint': True,
'openWorldHint': True,
},
tags={'search', 'entities', 'memory', 'core'},
meta={
'version': '1.0',
'category': 'core',
'priority': 0.8,
},
)
async def search_nodes(
query: str,
@ -566,12 +554,6 @@ async def search_nodes(
'idempotentHint': True,
'openWorldHint': True,
},
tags={'search', 'entities', 'legacy', 'compatibility'},
meta={
'version': '1.0',
'category': 'compatibility',
'priority': 0.7,
},
)
async def search_memory_nodes(
query: str,
@ -618,12 +600,6 @@ async def search_memory_nodes(
'idempotentHint': True,
'openWorldHint': True,
},
tags={'search', 'entities', 'browse', 'classification'},
meta={
'version': '1.0',
'category': 'discovery',
'priority': 0.75,
},
)
async def get_entities_by_type(
entity_types: list[str],
@ -748,12 +724,6 @@ async def get_entities_by_type(
'idempotentHint': True,
'openWorldHint': True,
},
tags={'search', 'facts', 'relationships', 'memory', 'core'},
meta={
'version': '1.0',
'category': 'core',
'priority': 0.85,
},
)
async def search_memory_facts(
query: str,
@ -849,12 +819,6 @@ async def search_memory_facts(
'idempotentHint': True,
'openWorldHint': True,
},
tags={'search', 'facts', 'temporal', 'analysis', 'evolution'},
meta={
'version': '1.0',
'category': 'analytics',
'priority': 0.6,
},
)
async def compare_facts_over_time(
query: str,
@ -1076,12 +1040,6 @@ async def compare_facts_over_time(
'idempotentHint': True,
'openWorldHint': True,
},
tags={'delete', 'destructive', 'facts', 'admin'},
meta={
'version': '1.0',
'category': 'maintenance',
'priority': 0.3,
},
)
async def delete_entity_edge(uuid: str) -> SuccessResponse | ErrorResponse:
"""Delete a specific relationship/fact. **DESTRUCTIVE - Cannot be undone.**
@ -1137,12 +1095,6 @@ async def delete_entity_edge(uuid: str) -> SuccessResponse | ErrorResponse:
'idempotentHint': True,
'openWorldHint': True,
},
tags={'delete', 'destructive', 'episodes', 'admin'},
meta={
'version': '1.0',
'category': 'maintenance',
'priority': 0.3,
},
)
async def delete_episode(uuid: str) -> SuccessResponse | ErrorResponse:
"""Delete a specific episode. **DESTRUCTIVE - Cannot be undone.**
@ -1199,12 +1151,6 @@ async def delete_episode(uuid: str) -> SuccessResponse | ErrorResponse:
'idempotentHint': True,
'openWorldHint': True,
},
tags={'retrieval', 'facts', 'uuid', 'direct-access'},
meta={
'version': '1.0',
'category': 'direct-access',
'priority': 0.5,
},
)
async def get_entity_edge(uuid: str) -> dict[str, Any] | ErrorResponse:
"""Retrieve a specific relationship/fact by its UUID. **Direct lookup only.**
@ -1254,12 +1200,6 @@ async def get_entity_edge(uuid: str) -> dict[str, Any] | ErrorResponse:
'idempotentHint': True,
'openWorldHint': True,
},
tags={'retrieval', 'episodes', 'history', 'changelog'},
meta={
'version': '1.0',
'category': 'direct-access',
'priority': 0.5,
},
)
async def get_episodes(
group_id: str | None = None,
@ -1368,12 +1308,6 @@ async def get_episodes(
'idempotentHint': True,
'openWorldHint': True,
},
tags={'delete', 'destructive', 'admin', 'bulk', 'danger'},
meta={
'version': '1.0',
'category': 'admin',
'priority': 0.1,
},
)
async def clear_graph(
group_id: str | None = None,
@ -1453,12 +1387,6 @@ async def clear_graph(
'idempotentHint': True,
'openWorldHint': True,
},
tags={'admin', 'health', 'status', 'diagnostics'},
meta={
'version': '1.0',
'category': 'admin',
'priority': 0.4,
},
)
async def get_status() -> StatusResponse:
"""Check server health and database connectivity.

View file

@ -1,6 +1,7 @@
"""Response type definitions for Graphiti MCP Server."""
from typing import Any, TypedDict
from typing import Any
from typing_extensions import TypedDict
class ErrorResponse(TypedDict):

View file

@ -2,7 +2,7 @@
from openai import AsyncAzureOpenAI
from config.schema import (
from ..config.schema import (
DatabaseConfig,
EmbedderConfig,
LLMConfig,
@ -70,7 +70,7 @@ try:
HAS_GROQ = True
except ImportError:
HAS_GROQ = False
from utils.utils import create_azure_credential_token_provider
from ..utils.utils import create_azure_credential_token_provider
def _validate_api_key(provider_name: str, api_key: str | None, logger) -> str: