update prompts and support thinking models (#846)

* update prompts and support thinking models

* update

* type ignore
This commit is contained in:
Preston Rasmussen 2025-08-19 12:31:50 -04:00 committed by GitHub
parent c28bde6b07
commit 1c27a3563b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 25 additions and 14 deletions

View file

@ -77,7 +77,11 @@ class IsPresidentOf(BaseModel):
async def main(use_bulk: bool = False): async def main(use_bulk: bool = False):
setup_logging() setup_logging()
client = Graphiti(neo4j_uri, neo4j_user, neo4j_password) client = Graphiti(
neo4j_uri,
neo4j_user,
neo4j_password,
)
await clear_data(client.driver) await clear_data(client.driver)
await client.build_indices_and_constraints() await client.build_indices_and_constraints()
messages = parse_podcast_messages() messages = parse_podcast_messages()

View file

@ -107,10 +107,10 @@ class BaseOpenAIClient(LLMClient):
def _handle_structured_response(self, response: Any) -> dict[str, Any]: def _handle_structured_response(self, response: Any) -> dict[str, Any]:
"""Handle structured response parsing and validation.""" """Handle structured response parsing and validation."""
response_object = response.choices[0].message response_object = response.output_text
if response_object.parsed: if response_object:
return response_object.parsed.model_dump() return json.loads(response_object)
elif response_object.refusal: elif response_object.refusal:
raise RefusalError(response_object.refusal) raise RefusalError(response_object.refusal)
else: else:

View file

@ -69,14 +69,16 @@ class OpenAIClient(BaseOpenAIClient):
response_model: type[BaseModel], response_model: type[BaseModel],
): ):
"""Create a structured completion using OpenAI's beta parse API.""" """Create a structured completion using OpenAI's beta parse API."""
return await self.client.beta.chat.completions.parse( response = await self.client.responses.parse(
model=model, model=model,
messages=messages, input=messages, # type: ignore
temperature=temperature, temperature=temperature,
max_tokens=max_tokens, max_output_tokens=max_tokens,
response_format=response_model, # type: ignore text_format=response_model, # type: ignore
) )
return response
async def _create_completion( async def _create_completion(
self, self,
model: str, model: str,

View file

@ -94,6 +94,7 @@ Only extract facts that:
- involve two DISTINCT ENTITIES from the ENTITIES list, - involve two DISTINCT ENTITIES from the ENTITIES list,
- are clearly stated or unambiguously implied in the CURRENT MESSAGE, - are clearly stated or unambiguously implied in the CURRENT MESSAGE,
and can be represented as edges in a knowledge graph. and can be represented as edges in a knowledge graph.
- Facts should include entity names rather than pronouns whenever possible.
- The FACT TYPES provide a list of the most important types of facts, make sure to extract facts of these types - The FACT TYPES provide a list of the most important types of facts, make sure to extract facts of these types
- The FACT TYPES are not an exhaustive list, extract all facts from the message even if they do not fit into one - The FACT TYPES are not an exhaustive list, extract all facts from the message even if they do not fit into one
of the FACT TYPES of the FACT TYPES

View file

@ -100,7 +100,7 @@ Instructions:
You are given a conversation context and a CURRENT MESSAGE. Your task is to extract **entity nodes** mentioned **explicitly or implicitly** in the CURRENT MESSAGE. You are given a conversation context and a CURRENT MESSAGE. Your task is to extract **entity nodes** mentioned **explicitly or implicitly** in the CURRENT MESSAGE.
Pronoun references such as he/she/they or this/that/those should be disambiguated to the names of the Pronoun references such as he/she/they or this/that/those should be disambiguated to the names of the
reference entities. reference entities. Only extract distinct entities from the CURRENT MESSAGE. Don't extract pronouns like you, me, he/she/they, we/us as entities.
1. **Speaker Extraction**: Always extract the speaker (the part before the colon `:` in each dialogue line) as the first entity node. 1. **Speaker Extraction**: Always extract the speaker (the part before the colon `:` in each dialogue line) as the first entity node.
- If the speaker is mentioned again in the message, treat both mentions as a **single entity**. - If the speaker is mentioned again in the message, treat both mentions as a **single entity**.

View file

@ -152,9 +152,13 @@ async def extract_nodes(
# Convert the extracted data into EntityNode objects # Convert the extracted data into EntityNode objects
extracted_nodes = [] extracted_nodes = []
for extracted_entity in filtered_extracted_entities: for extracted_entity in filtered_extracted_entities:
entity_type_name = entity_types_context[extracted_entity.entity_type_id].get( type_id = extracted_entity.entity_type_id
'entity_type_name' if 0 <= type_id < len(entity_types_context):
) entity_type_name = entity_types_context[extracted_entity.entity_type_id].get(
'entity_type_name'
)
else:
entity_type_name = 'Entity'
# Check if this entity type should be excluded # Check if this entity type should be excluded
if excluded_entity_types and entity_type_name in excluded_entity_types: if excluded_entity_types and entity_type_name in excluded_entity_types:

View file

@ -1,7 +1,7 @@
[project] [project]
name = "graphiti-core" name = "graphiti-core"
description = "A temporal graph building library" description = "A temporal graph building library"
version = "0.18.8" version = "0.18.9"
authors = [ authors = [
{ name = "Paul Paliychuk", email = "paul@getzep.com" }, { name = "Paul Paliychuk", email = "paul@getzep.com" },
{ name = "Preston Rasmussen", email = "preston@getzep.com" }, { name = "Preston Rasmussen", email = "preston@getzep.com" },

2
uv.lock generated
View file

@ -746,7 +746,7 @@ wheels = [
[[package]] [[package]]
name = "graphiti-core" name = "graphiti-core"
version = "0.18.7" version = "0.18.9"
source = { editable = "." } source = { editable = "." }
dependencies = [ dependencies = [
{ name = "diskcache" }, { name = "diskcache" },