chore: update version to 0.9.3 and restructure dependencies (#338)

* Bump version from 0.9.0 to 0.9.1 in pyproject.toml and update google-genai dependency to >=0.1.0

* Bump version from 0.9.1 to 0.9.2 in pyproject.toml

* Update google-genai dependency version to >=0.8.0 in pyproject.toml

* loc file

* Update pyproject.toml to version 0.9.3, restructure dependencies, and modify author format. Remove outdated Google API key note from README.md.

* upgrade poetry and ruff
This commit is contained in:
Daniel Chalef 2025-04-08 20:47:38 -07:00 committed by GitHub
parent 12ff02469a
commit 0f6ac57dab
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
19 changed files with 109 additions and 104 deletions

View file

@ -5,7 +5,7 @@ on:
tags: ["v*.*.*"]
env:
POETRY_VERSION: "1.8.3"
POETRY_VERSION: "2.1.2"
jobs:
release:

View file

@ -20,7 +20,7 @@ jobs:
- name: Install Poetry
uses: snok/install-poetry@v1
with:
version: 1.8.0
version: 2.1.2
virtualenvs-create: true
virtualenvs-in-project: true
installer-parallel: true

View file

@ -25,6 +25,7 @@ jobs:
- name: Install Poetry
uses: snok/install-poetry@v1
with:
version: 2.1.2
virtualenvs-create: true
virtualenvs-in-project: true
installer-parallel: true

View file

@ -261,8 +261,6 @@ graphiti = Graphiti(
# Now you can use Graphiti with Google Gemini
```
Make sure to replace the placeholder value with your actual Google API key. You can find more details in the example file at `examples/gemini_example.py`.
## Documentation
- [Guides and API documentation](https://help.getzep.com/graphiti).

View file

@ -36,7 +36,7 @@ class BGERerankerClient(CrossEncoderClient):
scores = await loop.run_in_executor(None, self.model.predict, input_pairs)
ranked_passages = sorted(
[(passage, float(score)) for passage, score in zip(passages, scores)],
[(passage, float(score)) for passage, score in zip(passages, scores, strict=False)],
key=lambda x: x[1],
reverse=True,
)

View file

@ -111,7 +111,7 @@ class OpenAIRerankerClient(CrossEncoderClient):
if bool(logprob.token):
scores.append(logprob.logprob)
results = [(passage, score) for passage, score in zip(passages, scores)]
results = [(passage, score) for passage, score in zip(passages, scores, strict=False)]
results.sort(reverse=True, key=lambda x: x[1])
return results
except openai.RateLimitError as e:

View file

@ -15,7 +15,6 @@ limitations under the License.
"""
from collections.abc import Iterable
from typing import Union
from openai import AsyncAzureOpenAI, AsyncOpenAI
from openai.types import EmbeddingModel
@ -41,7 +40,7 @@ class OpenAIEmbedder(EmbedderClient):
def __init__(
self,
config: OpenAIEmbedderConfig | None = None,
client: Union[AsyncOpenAI, AsyncAzureOpenAI, None] = None,
client: AsyncOpenAI | AsyncAzureOpenAI | None = None,
):
if config is None:
config = OpenAIEmbedderConfig()

View file

@ -451,7 +451,7 @@ class Graphiti:
existing_edges_list: list[list[EntityEdge]] = [
source_lst + target_lst
for source_lst, target_lst in zip(
existing_source_edges_list, existing_target_edges_list
existing_source_edges_list, existing_target_edges_list, strict=False
)
]

View file

@ -36,7 +36,7 @@ logger = logging.getLogger(__name__)
def is_server_or_retry_error(exception):
if isinstance(exception, (RateLimitError, json.decoder.JSONDecodeError)):
if isinstance(exception, RateLimitError | json.decoder.JSONDecodeError):
return True
return (

View file

@ -15,7 +15,7 @@ limitations under the License.
"""
import json
from typing import Any, Optional, Protocol, TypedDict
from typing import Any, Protocol, TypedDict
from pydantic import BaseModel, Field
@ -24,7 +24,7 @@ from .models import Message, PromptFunction, PromptVersion
class EdgeDuplicate(BaseModel):
is_duplicate: bool = Field(..., description='true or false')
uuid: Optional[str] = Field(
uuid: str | None = Field(
None,
description="uuid of the existing edge like '5d643020624c42fa9de13f97b1b3fa39' or null",
)

View file

@ -15,7 +15,7 @@ limitations under the License.
"""
import json
from typing import Any, Optional, Protocol, TypedDict
from typing import Any, Protocol, TypedDict
from pydantic import BaseModel, Field
@ -24,7 +24,7 @@ from .models import Message, PromptFunction, PromptVersion
class NodeDuplicate(BaseModel):
is_duplicate: bool = Field(..., description='true or false')
uuid: Optional[str] = Field(
uuid: str | None = Field(
None,
description="uuid of the existing node like '5d643020624c42fa9de13f97b1b3fa39' or null",
)

View file

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Any, Optional, Protocol, TypedDict
from typing import Any, Protocol, TypedDict
from pydantic import BaseModel, Field
@ -22,11 +22,11 @@ from .models import Message, PromptFunction, PromptVersion
class EdgeDates(BaseModel):
valid_at: Optional[str] = Field(
valid_at: str | None = Field(
None,
description='The date and time when the relationship described by the edge fact became true or was established. YYYY-MM-DDTHH:MM:SS.SSSSSSZ or null.',
)
invalid_at: Optional[str] = Field(
invalid_at: str | None = Field(
None,
description='The date and time when the relationship described by the edge fact stopped being true or ended. YYYY-MM-DDTHH:MM:SS.SSSSSSZ or null.',
)

View file

@ -14,7 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Any, Callable, Protocol
from collections.abc import Callable
from typing import Any, Protocol
from pydantic import BaseModel

View file

@ -164,7 +164,7 @@ async def build_community(
*[
summarize_pair(llm_client, (str(left_summary), str(right_summary)))
for left_summary, right_summary in zip(
summaries[: int(length / 2)], summaries[int(length / 2) :]
summaries[: int(length / 2)], summaries[int(length / 2) :], strict=False
)
]
)

View file

@ -213,7 +213,7 @@ async def resolve_extracted_edges(
previous_episodes,
)
for extracted_edge, related_edges, existing_edges in zip(
extracted_edges, related_edges_lists, existing_edges_lists
extracted_edges, related_edges_lists, existing_edges_lists, strict=False
)
]
)

View file

@ -279,7 +279,9 @@ async def resolve_extracted_nodes(
previous_episodes,
entity_types,
)
for extracted_node, existing_nodes in zip(extracted_nodes, existing_nodes_lists)
for extracted_node, existing_nodes in zip(
extracted_nodes, existing_nodes_lists, strict=False
)
]
)
)

View file

@ -10,7 +10,7 @@ import os
import sys
import uuid
from datetime import datetime, timezone
from typing import Any, Optional, TypedDict, Union, cast
from typing import Any, TypedDict, cast
from dotenv import load_dotenv
from mcp.server.fastmcp import FastMCP
@ -169,7 +169,7 @@ class GraphitiLLMConfig(BaseModel):
Centralizes all LLM-specific configuration parameters including API keys and model selection.
"""
api_key: Optional[str] = None
api_key: str | None = None
model: str = DEFAULT_LLM_MODEL
temperature: float = 0.0
@ -216,7 +216,7 @@ class GraphitiLLMConfig(BaseModel):
return config
def create_client(self) -> Optional[LLMClient]:
def create_client(self) -> LLMClient | None:
"""Create an LLM client based on this configuration.
Returns:
@ -258,7 +258,7 @@ class GraphitiConfig(BaseModel):
llm: GraphitiLLMConfig = Field(default_factory=GraphitiLLMConfig)
neo4j: Neo4jConfig = Field(default_factory=Neo4jConfig)
group_id: Optional[str] = None
group_id: str | None = None
use_custom_entities: bool = False
destroy_graph: bool = False
@ -353,7 +353,7 @@ mcp = FastMCP(
# Initialize Graphiti client
graphiti_client: Optional[Graphiti] = None
graphiti_client: Graphiti | None = None
async def initialize_graphiti():
@ -469,11 +469,11 @@ async def process_episode_queue(group_id: str):
async def add_episode(
name: str,
episode_body: str,
group_id: Optional[str] = None,
group_id: str | None = None,
source: str = 'text',
source_description: str = '',
uuid: Optional[str] = None,
) -> Union[SuccessResponse, ErrorResponse]:
uuid: str | None = None,
) -> SuccessResponse | ErrorResponse:
"""Add an episode to the Graphiti knowledge graph. This is the primary way to add information to the graph.
This function returns immediately and processes the episode addition in the background.
@ -609,11 +609,11 @@ async def add_episode(
@mcp.tool()
async def search_nodes(
query: str,
group_ids: Optional[list[str]] = None,
group_ids: list[str] | None = None,
max_nodes: int = 10,
center_node_uuid: Optional[str] = None,
center_node_uuid: str | None = None,
entity: str = '', # cursor seems to break with None
) -> Union[NodeSearchResponse, ErrorResponse]:
) -> NodeSearchResponse | ErrorResponse:
"""Search the Graphiti knowledge graph for relevant node summaries.
These contain a summary of all of a node's relationships with other nodes.
@ -690,10 +690,10 @@ async def search_nodes(
@mcp.tool()
async def search_facts(
query: str,
group_ids: Optional[list[str]] = None,
group_ids: list[str] | None = None,
max_facts: int = 10,
center_node_uuid: Optional[str] = None,
) -> Union[FactSearchResponse, ErrorResponse]:
center_node_uuid: str | None = None,
) -> FactSearchResponse | ErrorResponse:
"""Search the Graphiti knowledge graph for relevant facts.
Args:
@ -738,7 +738,7 @@ async def search_facts(
@mcp.tool()
async def delete_entity_edge(uuid: str) -> Union[SuccessResponse, ErrorResponse]:
async def delete_entity_edge(uuid: str) -> SuccessResponse | ErrorResponse:
"""Delete an entity edge from the Graphiti knowledge graph.
Args:
@ -768,7 +768,7 @@ async def delete_entity_edge(uuid: str) -> Union[SuccessResponse, ErrorResponse]
@mcp.tool()
async def delete_episode(uuid: str) -> Union[SuccessResponse, ErrorResponse]:
async def delete_episode(uuid: str) -> SuccessResponse | ErrorResponse:
"""Delete an episode from the Graphiti knowledge graph.
Args:
@ -798,7 +798,7 @@ async def delete_episode(uuid: str) -> Union[SuccessResponse, ErrorResponse]:
@mcp.tool()
async def get_entity_edge(uuid: str) -> Union[dict[str, Any], ErrorResponse]:
async def get_entity_edge(uuid: str) -> dict[str, Any] | ErrorResponse:
"""Get an entity edge from the Graphiti knowledge graph by its UUID.
Args:
@ -830,8 +830,8 @@ async def get_entity_edge(uuid: str) -> Union[dict[str, Any], ErrorResponse]:
@mcp.tool()
async def get_episodes(
group_id: Optional[str] = None, last_n: int = 10
) -> Union[list[dict[str, Any]], EpisodeSearchResponse, ErrorResponse]:
group_id: str | None = None, last_n: int = 10
) -> list[dict[str, Any]] | EpisodeSearchResponse | ErrorResponse:
"""Get the most recent episodes for a specific group.
Args:
@ -879,7 +879,7 @@ async def get_episodes(
@mcp.tool()
async def clear_graph() -> Union[SuccessResponse, ErrorResponse]:
async def clear_graph() -> SuccessResponse | ErrorResponse:
"""Clear all data from the Graphiti knowledge graph and rebuild indices."""
global graphiti_client

View file

@ -1,56 +1,65 @@
[tool.poetry]
[project]
name = "graphiti-core"
version = "0.9.2"
description = "A temporal graph building library"
version = "0.9.3"
authors = [
"Paul Paliychuk <paul@getzep.com>",
"Preston Rasmussen <preston@getzep.com>",
"Daniel Chalef <daniel@getzep.com>",
{"name" = "Paul Paliychuk", "email" = "paul@getzep.com"},
{"name" = "Preston Rasmussen", "email" = "preston@getzep.com"},
{"name" = "Daniel Chalef", "email" = "daniel@getzep.com"},
]
readme = "README.md"
license = "Apache-2.0"
requires-python = ">=3.10"
packages = [{ include = "graphiti_core", from = "." }]
dependencies = [
"pydantic>=2.8.2",
"neo4j>=5.23.0",
"diskcache>=5.6.3",
"openai>=1.53.0",
"tenacity>=9.0.0",
"numpy>=1.0.0",
"python-dotenv>=1.0.1",
]
[tool.poetry.dependencies]
python = "^3.10"
pydantic = "^2.8.2"
neo4j = "^5.23.0"
diskcache = "^5.6.3"
openai = "^1.53.0"
tenacity = "9.0.0"
numpy = ">=1.0.0"
python-dotenv = "^1.0.1"
[tool.poetry.extras]
anthropic = ["anthropic"]
groq = ["groq"]
google-genai = ["google-genai"]
[project.urls]
Homepage = "https://help.getzep.com/graphiti/graphiti/overview"
Repository = "https://github.com/getzep/graphiti"
[tool.poetry.group.dev.dependencies]
mypy = "^1.11.1"
groq = ">=0.9,<0.12"
anthropic = "~0.49.0"
google-genai = ">=0.8.0"
ipykernel = "^6.29.5"
jupyterlab = "^4.2.4"
diskcache-stubs = "^5.6.3.6.20240818"
langgraph = "^0.2.15"
langchain-anthropic = "^0.2.4"
langsmith = "^0.1.108"
langchain-openai = "^0.2.6"
sentence-transformers = "^3.2.1"
transformers = "^4.45.2"
voyageai = "^0.2.3"
pytest = "^8.3.3"
pytest-asyncio = "^0.24.0"
pytest-xdist = "^3.6.1"
ruff = "^0.7.1"
[project.optional-dependencies]
anthropic = ["anthropic>=0.49.0"]
groq = ["groq>=0.2.0"]
google-genai = ["google-genai>=1.8.0"]
[dependency-groups]
dev = [
"mypy>=1.11.1",
"groq>=0.2.0",
"anthropic>=0.49.0",
"google-genai>=1.8.0",
"ipykernel>=6.29.5",
"jupyterlab>=4.2.4",
"diskcache-stubs>=5.6.3.6.20240818",
"langgraph>=0.2.15",
"langchain-anthropic>=0.2.4",
"langsmith>=0.1.108",
"langchain-openai>=0.2.6",
"sentence-transformers>=3.2.1",
"transformers>=4.45.2",
"voyageai>=0.2.3",
"pytest>=8.3.3",
"pytest-asyncio>=0.24.0",
"pytest-xdist>=3.6.1",
"ruff>=0.7.1",
]
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
[tool.poetry]
requires-poetry = ">=2.0"
[tool.pytest.ini_options]
pythonpath = ["."]

View file

@ -13,11 +13,10 @@ async def test_hybrid_node_search_deduplication():
mock_driver = AsyncMock()
# Mock the node_fulltext_search and entity_similarity_search functions
with patch(
'graphiti_core.search.search_utils.node_fulltext_search'
) as mock_fulltext_search, patch(
'graphiti_core.search.search_utils.node_similarity_search'
) as mock_similarity_search:
with (
patch('graphiti_core.search.search_utils.node_fulltext_search') as mock_fulltext_search,
patch('graphiti_core.search.search_utils.node_similarity_search') as mock_similarity_search,
):
# Set up mock return values
mock_fulltext_search.side_effect = [
[EntityNode(uuid='1', name='Alice', labels=['Entity'], group_id='1')],
@ -47,11 +46,10 @@ async def test_hybrid_node_search_deduplication():
async def test_hybrid_node_search_empty_results():
mock_driver = AsyncMock()
with patch(
'graphiti_core.search.search_utils.node_fulltext_search'
) as mock_fulltext_search, patch(
'graphiti_core.search.search_utils.node_similarity_search'
) as mock_similarity_search:
with (
patch('graphiti_core.search.search_utils.node_fulltext_search') as mock_fulltext_search,
patch('graphiti_core.search.search_utils.node_similarity_search') as mock_similarity_search,
):
mock_fulltext_search.return_value = []
mock_similarity_search.return_value = []
@ -66,11 +64,10 @@ async def test_hybrid_node_search_empty_results():
async def test_hybrid_node_search_only_fulltext():
mock_driver = AsyncMock()
with patch(
'graphiti_core.search.search_utils.node_fulltext_search'
) as mock_fulltext_search, patch(
'graphiti_core.search.search_utils.node_similarity_search'
) as mock_similarity_search:
with (
patch('graphiti_core.search.search_utils.node_fulltext_search') as mock_fulltext_search,
patch('graphiti_core.search.search_utils.node_similarity_search') as mock_similarity_search,
):
mock_fulltext_search.return_value = [
EntityNode(uuid='1', name='Alice', labels=['Entity'], group_id='1')
]
@ -90,11 +87,10 @@ async def test_hybrid_node_search_only_fulltext():
async def test_hybrid_node_search_with_limit():
mock_driver = AsyncMock()
with patch(
'graphiti_core.search.search_utils.node_fulltext_search'
) as mock_fulltext_search, patch(
'graphiti_core.search.search_utils.node_similarity_search'
) as mock_similarity_search:
with (
patch('graphiti_core.search.search_utils.node_fulltext_search') as mock_fulltext_search,
patch('graphiti_core.search.search_utils.node_similarity_search') as mock_similarity_search,
):
mock_fulltext_search.return_value = [
EntityNode(uuid='1', name='Alice', labels=['Entity'], group_id='1'),
EntityNode(uuid='2', name='Bob', labels=['Entity'], group_id='1'),
@ -133,11 +129,10 @@ async def test_hybrid_node_search_with_limit():
async def test_hybrid_node_search_with_limit_and_duplicates():
mock_driver = AsyncMock()
with patch(
'graphiti_core.search.search_utils.node_fulltext_search'
) as mock_fulltext_search, patch(
'graphiti_core.search.search_utils.node_similarity_search'
) as mock_similarity_search:
with (
patch('graphiti_core.search.search_utils.node_fulltext_search') as mock_fulltext_search,
patch('graphiti_core.search.search_utils.node_similarity_search') as mock_similarity_search,
):
mock_fulltext_search.return_value = [
EntityNode(uuid='1', name='Alice', labels=['Entity'], group_id='1'),
EntityNode(uuid='2', name='Bob', labels=['Entity'], group_id='1'),