chore: update dependencies and refactor type hinting (#339)

* Bump version from 0.9.0 to 0.9.1 in pyproject.toml and update google-genai dependency to >=0.1.0

* Bump version from 0.9.1 to 0.9.2 in pyproject.toml

* Update google-genai dependency version to >=0.8.0 in pyproject.toml

* loc file

* Update pyproject.toml to version 0.9.3, restructure dependencies, and modify author format. Remove outdated Google API key note from README.md.

* upgrade poetry and ruff

* Update README.md to include installation instructions for Graphiti with Google Gemini support

* fix to deps since peotry doesn't fully implement PEP 735

* Refactor string formatting in various files to use single quotes for consistency and improve readability. This includes updates in agent.ipynb, quickstart.py, multiple prompt files, and ingest.py and retrieve.py modules.

* Remove optional dependencies from pyproject.toml to streamline project requirements.
This commit is contained in:
Daniel Chalef 2025-04-09 08:05:26 -07:00 committed by GitHub
parent 77406dfae9
commit 7f7a17c926
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 783 additions and 239 deletions

View file

@ -363,7 +363,7 @@
" asyncio.create_task(\n",
" client.add_episode(\n",
" name='Chatbot Response',\n",
" episode_body=f\"{state['user_name']}: {state['messages'][-1]}\\nSalesBot: {response.content}\",\n",
" episode_body=f'{state[\"user_name\"]}: {state[\"messages\"][-1]}\\nSalesBot: {response.content}',\n",
" source=EpisodeType.message,\n",
" reference_time=datetime.now(timezone.utc),\n",
" source_description='Chatbot',\n",

View file

@ -217,7 +217,7 @@ async def main():
print(f'Node Name: {node.name}')
node_summary = node.summary[:100] + '...' if len(node.summary) > 100 else node.summary
print(f'Content Summary: {node_summary}')
print(f"Node Labels: {', '.join(node.labels)}")
print(f'Node Labels: {", ".join(node.labels)}')
print(f'Created At: {node.created_at}')
if hasattr(node, 'attributes') and node.attributes:
print('Attributes:')

View file

@ -55,4 +55,4 @@ class VoyageAIEmbedder(EmbedderClient):
return []
result = await self.client.embed(input_list, model=self.config.embedding_model)
return result.embeddings[0][: self.config.embedding_dim]
return [float(x) for x in result.embeddings[0][: self.config.embedding_dim]]

View file

@ -57,7 +57,7 @@ def node(context: dict[str, Any]) -> list[Message]:
{json.dumps([ep for ep in context['previous_episodes']], indent=2)}
</PREVIOUS MESSAGES>
<CURRENT MESSAGE>
{context["episode_content"]}
{context['episode_content']}
</CURRENT MESSAGE>
<EXISTING NODES>

View file

@ -53,7 +53,7 @@ def v1(context: dict[str, Any]) -> list[Message]:
{context['previous_episodes']}
</PREVIOUS MESSAGES>
<CURRENT MESSAGE>
{context["current_episode"]}
{context['current_episode']}
</CURRENT MESSAGE>
<REFERENCE TIMESTAMP>
{context['reference_timestamp']}

View file

@ -60,11 +60,11 @@ def edge(context: dict[str, Any]) -> list[Message]:
{json.dumps([ep for ep in context['previous_episodes']], indent=2)}
</PREVIOUS MESSAGES>
<CURRENT MESSAGE>
{context["episode_content"]}
{context['episode_content']}
</CURRENT MESSAGE>
<ENTITIES>
{context["nodes"]}
{context['nodes']}
</ENTITIES>
{context['custom_prompt']}
@ -90,15 +90,15 @@ def reflexion(context: dict[str, Any]) -> list[Message]:
{json.dumps([ep for ep in context['previous_episodes']], indent=2)}
</PREVIOUS MESSAGES>
<CURRENT MESSAGE>
{context["episode_content"]}
{context['episode_content']}
</CURRENT MESSAGE>
<EXTRACTED ENTITIES>
{context["nodes"]}
{context['nodes']}
</EXTRACTED ENTITIES>
<EXTRACTED FACTS>
{context["extracted_facts"]}
{context['extracted_facts']}
</EXTRACTED FACTS>
Given the above MESSAGES, list of EXTRACTED ENTITIES entities, and list of EXTRACTED FACTS;

View file

@ -68,7 +68,7 @@ def extract_message(context: dict[str, Any]) -> list[Message]:
{json.dumps([ep for ep in context['previous_episodes']], indent=2)}
</PREVIOUS MESSAGES>
<CURRENT MESSAGE>
{context["episode_content"]}
{context['episode_content']}
</CURRENT MESSAGE>
{context['custom_prompt']}
@ -96,10 +96,10 @@ def extract_json(context: dict[str, Any]) -> list[Message]:
user_prompt = f"""
<SOURCE DESCRIPTION>:
{context["source_description"]}
{context['source_description']}
</SOURCE DESCRIPTION>
<JSON>
{context["episode_content"]}
{context['episode_content']}
</JSON>
{context['custom_prompt']}
@ -121,7 +121,7 @@ def extract_text(context: dict[str, Any]) -> list[Message]:
user_prompt = f"""
<TEXT>
{context["episode_content"]}
{context['episode_content']}
</TEXT>
{context['custom_prompt']}
@ -148,11 +148,11 @@ def reflexion(context: dict[str, Any]) -> list[Message]:
{json.dumps([ep for ep in context['previous_episodes']], indent=2)}
</PREVIOUS MESSAGES>
<CURRENT MESSAGE>
{context["episode_content"]}
{context['episode_content']}
</CURRENT MESSAGE>
<EXTRACTED ENTITIES>
{context["extracted_entities"]}
{context['extracted_entities']}
</EXTRACTED ENTITIES>
Given the above previous messages, current message, and list of extracted entities; determine if any entities haven't been
@ -172,7 +172,7 @@ def classify_nodes(context: dict[str, Any]) -> list[Message]:
{json.dumps([ep for ep in context['previous_episodes']], indent=2)}
</PREVIOUS MESSAGES>
<CURRENT MESSAGE>
{context["episode_content"]}
{context['episode_content']}
</CURRENT MESSAGE>
<EXTRACTED ENTITIES>

941
poetry.lock generated

File diff suppressed because it is too large Load diff

View file

@ -9,7 +9,7 @@ authors = [
]
readme = "README.md"
license = "Apache-2.0"
requires-python = ">=3.10"
requires-python = ">=3.10,<4"
packages = [{ include = "graphiti_core", from = "." }]
dependencies = [
"pydantic>=2.8.2",
@ -21,7 +21,6 @@ dependencies = [
"python-dotenv>=1.0.1",
]
[project.urls]
Homepage = "https://help.getzep.com/graphiti/graphiti/overview"
Repository = "https://github.com/getzep/graphiti"
@ -31,27 +30,25 @@ anthropic = ["anthropic>=0.49.0"]
groq = ["groq>=0.2.0"]
google-genai = ["google-genai>=1.8.0"]
[dependency-groups]
dev = [
"mypy>=1.11.1",
"groq>=0.2.0",
"anthropic>=0.49.0",
"google-genai>=1.8.0",
"ipykernel>=6.29.5",
"jupyterlab>=4.2.4",
"diskcache-stubs>=5.6.3.6.20240818",
"langgraph>=0.2.15",
"langchain-anthropic>=0.2.4",
"langsmith>=0.1.108",
"langchain-openai>=0.2.6",
"sentence-transformers>=3.2.1",
"transformers>=4.45.2",
"voyageai>=0.2.3",
"pytest>=8.3.3",
"pytest-asyncio>=0.24.0",
"pytest-xdist>=3.6.1",
"ruff>=0.7.1",
]
[tool.poetry.group.dev.dependencies]
mypy = ">=1.11.1"
groq = ">=0.2.0"
anthropic = ">=0.49.0"
google-genai = ">=1.8.0"
ipykernel = ">=6.29.5"
jupyterlab = ">=4.2.4"
diskcache-stubs = ">=5.6.3.6.20240818"
langgraph = ">=0.2.15"
langchain-anthropic = ">=0.2.4"
langsmith = ">=0.1.108"
langchain-openai = ">=0.2.6"
sentence-transformers = ">=3.2.1"
transformers = ">=4.45.2"
voyageai = ">=0.2.3"
pytest = ">=8.3.3"
pytest-asyncio = ">=0.24.0"
pytest-xdist = ">=3.6.1"
ruff = ">=0.7.1"
[build-system]
requires = ["poetry-core"]

View file

@ -58,7 +58,7 @@ async def add_messages(
uuid=m.uuid,
group_id=request.group_id,
name=m.name,
episode_body=f"{m.role or ''}({m.role_type}): {m.content}",
episode_body=f'{m.role or ""}({m.role_type}): {m.content}',
reference_time=m.timestamp,
source=EpisodeType.message,
source_description=m.source_description,

View file

@ -59,5 +59,5 @@ async def get_memory(
def compose_query_from_messages(messages: list[Message]):
combined_query = ''
for message in messages:
combined_query += f"{message.role_type or ''}({message.role or ''}): {message.content}\n"
combined_query += f'{message.role_type or ""}({message.role or ""}): {message.content}\n'
return combined_query