chore: update dependencies and refactor type hinting (#339)

* Bump version from 0.9.0 to 0.9.1 in pyproject.toml and update google-genai dependency to >=0.1.0

* Bump version from 0.9.1 to 0.9.2 in pyproject.toml

* Update google-genai dependency version to >=0.8.0 in pyproject.toml

* loc file

* Update pyproject.toml to version 0.9.3, restructure dependencies, and modify author format. Remove outdated Google API key note from README.md.

* upgrade poetry and ruff

* Update README.md to include installation instructions for Graphiti with Google Gemini support

* fix to deps since peotry doesn't fully implement PEP 735

* Refactor string formatting in various files to use single quotes for consistency and improve readability. This includes updates in agent.ipynb, quickstart.py, multiple prompt files, and ingest.py and retrieve.py modules.

* Remove optional dependencies from pyproject.toml to streamline project requirements.
This commit is contained in:
Daniel Chalef 2025-04-09 08:05:26 -07:00 committed by GitHub
parent 77406dfae9
commit 7f7a17c926
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 783 additions and 239 deletions

View file

@ -363,7 +363,7 @@
" asyncio.create_task(\n", " asyncio.create_task(\n",
" client.add_episode(\n", " client.add_episode(\n",
" name='Chatbot Response',\n", " name='Chatbot Response',\n",
" episode_body=f\"{state['user_name']}: {state['messages'][-1]}\\nSalesBot: {response.content}\",\n", " episode_body=f'{state[\"user_name\"]}: {state[\"messages\"][-1]}\\nSalesBot: {response.content}',\n",
" source=EpisodeType.message,\n", " source=EpisodeType.message,\n",
" reference_time=datetime.now(timezone.utc),\n", " reference_time=datetime.now(timezone.utc),\n",
" source_description='Chatbot',\n", " source_description='Chatbot',\n",

View file

@ -217,7 +217,7 @@ async def main():
print(f'Node Name: {node.name}') print(f'Node Name: {node.name}')
node_summary = node.summary[:100] + '...' if len(node.summary) > 100 else node.summary node_summary = node.summary[:100] + '...' if len(node.summary) > 100 else node.summary
print(f'Content Summary: {node_summary}') print(f'Content Summary: {node_summary}')
print(f"Node Labels: {', '.join(node.labels)}") print(f'Node Labels: {", ".join(node.labels)}')
print(f'Created At: {node.created_at}') print(f'Created At: {node.created_at}')
if hasattr(node, 'attributes') and node.attributes: if hasattr(node, 'attributes') and node.attributes:
print('Attributes:') print('Attributes:')

View file

@ -55,4 +55,4 @@ class VoyageAIEmbedder(EmbedderClient):
return [] return []
result = await self.client.embed(input_list, model=self.config.embedding_model) result = await self.client.embed(input_list, model=self.config.embedding_model)
return result.embeddings[0][: self.config.embedding_dim] return [float(x) for x in result.embeddings[0][: self.config.embedding_dim]]

View file

@ -57,7 +57,7 @@ def node(context: dict[str, Any]) -> list[Message]:
{json.dumps([ep for ep in context['previous_episodes']], indent=2)} {json.dumps([ep for ep in context['previous_episodes']], indent=2)}
</PREVIOUS MESSAGES> </PREVIOUS MESSAGES>
<CURRENT MESSAGE> <CURRENT MESSAGE>
{context["episode_content"]} {context['episode_content']}
</CURRENT MESSAGE> </CURRENT MESSAGE>
<EXISTING NODES> <EXISTING NODES>

View file

@ -53,7 +53,7 @@ def v1(context: dict[str, Any]) -> list[Message]:
{context['previous_episodes']} {context['previous_episodes']}
</PREVIOUS MESSAGES> </PREVIOUS MESSAGES>
<CURRENT MESSAGE> <CURRENT MESSAGE>
{context["current_episode"]} {context['current_episode']}
</CURRENT MESSAGE> </CURRENT MESSAGE>
<REFERENCE TIMESTAMP> <REFERENCE TIMESTAMP>
{context['reference_timestamp']} {context['reference_timestamp']}

View file

@ -60,11 +60,11 @@ def edge(context: dict[str, Any]) -> list[Message]:
{json.dumps([ep for ep in context['previous_episodes']], indent=2)} {json.dumps([ep for ep in context['previous_episodes']], indent=2)}
</PREVIOUS MESSAGES> </PREVIOUS MESSAGES>
<CURRENT MESSAGE> <CURRENT MESSAGE>
{context["episode_content"]} {context['episode_content']}
</CURRENT MESSAGE> </CURRENT MESSAGE>
<ENTITIES> <ENTITIES>
{context["nodes"]} {context['nodes']}
</ENTITIES> </ENTITIES>
{context['custom_prompt']} {context['custom_prompt']}
@ -90,15 +90,15 @@ def reflexion(context: dict[str, Any]) -> list[Message]:
{json.dumps([ep for ep in context['previous_episodes']], indent=2)} {json.dumps([ep for ep in context['previous_episodes']], indent=2)}
</PREVIOUS MESSAGES> </PREVIOUS MESSAGES>
<CURRENT MESSAGE> <CURRENT MESSAGE>
{context["episode_content"]} {context['episode_content']}
</CURRENT MESSAGE> </CURRENT MESSAGE>
<EXTRACTED ENTITIES> <EXTRACTED ENTITIES>
{context["nodes"]} {context['nodes']}
</EXTRACTED ENTITIES> </EXTRACTED ENTITIES>
<EXTRACTED FACTS> <EXTRACTED FACTS>
{context["extracted_facts"]} {context['extracted_facts']}
</EXTRACTED FACTS> </EXTRACTED FACTS>
Given the above MESSAGES, list of EXTRACTED ENTITIES entities, and list of EXTRACTED FACTS; Given the above MESSAGES, list of EXTRACTED ENTITIES entities, and list of EXTRACTED FACTS;

View file

@ -68,7 +68,7 @@ def extract_message(context: dict[str, Any]) -> list[Message]:
{json.dumps([ep for ep in context['previous_episodes']], indent=2)} {json.dumps([ep for ep in context['previous_episodes']], indent=2)}
</PREVIOUS MESSAGES> </PREVIOUS MESSAGES>
<CURRENT MESSAGE> <CURRENT MESSAGE>
{context["episode_content"]} {context['episode_content']}
</CURRENT MESSAGE> </CURRENT MESSAGE>
{context['custom_prompt']} {context['custom_prompt']}
@ -96,10 +96,10 @@ def extract_json(context: dict[str, Any]) -> list[Message]:
user_prompt = f""" user_prompt = f"""
<SOURCE DESCRIPTION>: <SOURCE DESCRIPTION>:
{context["source_description"]} {context['source_description']}
</SOURCE DESCRIPTION> </SOURCE DESCRIPTION>
<JSON> <JSON>
{context["episode_content"]} {context['episode_content']}
</JSON> </JSON>
{context['custom_prompt']} {context['custom_prompt']}
@ -121,7 +121,7 @@ def extract_text(context: dict[str, Any]) -> list[Message]:
user_prompt = f""" user_prompt = f"""
<TEXT> <TEXT>
{context["episode_content"]} {context['episode_content']}
</TEXT> </TEXT>
{context['custom_prompt']} {context['custom_prompt']}
@ -148,11 +148,11 @@ def reflexion(context: dict[str, Any]) -> list[Message]:
{json.dumps([ep for ep in context['previous_episodes']], indent=2)} {json.dumps([ep for ep in context['previous_episodes']], indent=2)}
</PREVIOUS MESSAGES> </PREVIOUS MESSAGES>
<CURRENT MESSAGE> <CURRENT MESSAGE>
{context["episode_content"]} {context['episode_content']}
</CURRENT MESSAGE> </CURRENT MESSAGE>
<EXTRACTED ENTITIES> <EXTRACTED ENTITIES>
{context["extracted_entities"]} {context['extracted_entities']}
</EXTRACTED ENTITIES> </EXTRACTED ENTITIES>
Given the above previous messages, current message, and list of extracted entities; determine if any entities haven't been Given the above previous messages, current message, and list of extracted entities; determine if any entities haven't been
@ -172,7 +172,7 @@ def classify_nodes(context: dict[str, Any]) -> list[Message]:
{json.dumps([ep for ep in context['previous_episodes']], indent=2)} {json.dumps([ep for ep in context['previous_episodes']], indent=2)}
</PREVIOUS MESSAGES> </PREVIOUS MESSAGES>
<CURRENT MESSAGE> <CURRENT MESSAGE>
{context["episode_content"]} {context['episode_content']}
</CURRENT MESSAGE> </CURRENT MESSAGE>
<EXTRACTED ENTITIES> <EXTRACTED ENTITIES>

941
poetry.lock generated

File diff suppressed because it is too large Load diff

View file

@ -9,7 +9,7 @@ authors = [
] ]
readme = "README.md" readme = "README.md"
license = "Apache-2.0" license = "Apache-2.0"
requires-python = ">=3.10" requires-python = ">=3.10,<4"
packages = [{ include = "graphiti_core", from = "." }] packages = [{ include = "graphiti_core", from = "." }]
dependencies = [ dependencies = [
"pydantic>=2.8.2", "pydantic>=2.8.2",
@ -21,7 +21,6 @@ dependencies = [
"python-dotenv>=1.0.1", "python-dotenv>=1.0.1",
] ]
[project.urls] [project.urls]
Homepage = "https://help.getzep.com/graphiti/graphiti/overview" Homepage = "https://help.getzep.com/graphiti/graphiti/overview"
Repository = "https://github.com/getzep/graphiti" Repository = "https://github.com/getzep/graphiti"
@ -31,27 +30,25 @@ anthropic = ["anthropic>=0.49.0"]
groq = ["groq>=0.2.0"] groq = ["groq>=0.2.0"]
google-genai = ["google-genai>=1.8.0"] google-genai = ["google-genai>=1.8.0"]
[dependency-groups] [tool.poetry.group.dev.dependencies]
dev = [ mypy = ">=1.11.1"
"mypy>=1.11.1", groq = ">=0.2.0"
"groq>=0.2.0", anthropic = ">=0.49.0"
"anthropic>=0.49.0", google-genai = ">=1.8.0"
"google-genai>=1.8.0", ipykernel = ">=6.29.5"
"ipykernel>=6.29.5", jupyterlab = ">=4.2.4"
"jupyterlab>=4.2.4", diskcache-stubs = ">=5.6.3.6.20240818"
"diskcache-stubs>=5.6.3.6.20240818", langgraph = ">=0.2.15"
"langgraph>=0.2.15", langchain-anthropic = ">=0.2.4"
"langchain-anthropic>=0.2.4", langsmith = ">=0.1.108"
"langsmith>=0.1.108", langchain-openai = ">=0.2.6"
"langchain-openai>=0.2.6", sentence-transformers = ">=3.2.1"
"sentence-transformers>=3.2.1", transformers = ">=4.45.2"
"transformers>=4.45.2", voyageai = ">=0.2.3"
"voyageai>=0.2.3", pytest = ">=8.3.3"
"pytest>=8.3.3", pytest-asyncio = ">=0.24.0"
"pytest-asyncio>=0.24.0", pytest-xdist = ">=3.6.1"
"pytest-xdist>=3.6.1", ruff = ">=0.7.1"
"ruff>=0.7.1",
]
[build-system] [build-system]
requires = ["poetry-core"] requires = ["poetry-core"]

View file

@ -58,7 +58,7 @@ async def add_messages(
uuid=m.uuid, uuid=m.uuid,
group_id=request.group_id, group_id=request.group_id,
name=m.name, name=m.name,
episode_body=f"{m.role or ''}({m.role_type}): {m.content}", episode_body=f'{m.role or ""}({m.role_type}): {m.content}',
reference_time=m.timestamp, reference_time=m.timestamp,
source=EpisodeType.message, source=EpisodeType.message,
source_description=m.source_description, source_description=m.source_description,

View file

@ -59,5 +59,5 @@ async def get_memory(
def compose_query_from_messages(messages: list[Message]): def compose_query_from_messages(messages: list[Message]):
combined_query = '' combined_query = ''
for message in messages: for message in messages:
combined_query += f"{message.role_type or ''}({message.role or ''}): {message.content}\n" combined_query += f'{message.role_type or ""}({message.role or ""}): {message.content}\n'
return combined_query return combined_query