106 lines
3.8 KiB
Python
106 lines
3.8 KiB
Python
import asyncio
|
|
from typing import Type, List, Optional
|
|
from pydantic import BaseModel
|
|
|
|
from cognee.infrastructure.databases.graph import get_graph_engine
|
|
from cognee.tasks.storage.add_data_points import add_data_points
|
|
from cognee.modules.ontology.rdf_xml.RDFLibOntologyResolver import RDFLibOntologyResolver
|
|
from cognee.modules.chunking.models.DocumentChunk import DocumentChunk
|
|
from cognee.modules.graph.utils import (
|
|
expand_with_nodes_and_edges,
|
|
retrieve_existing_edges,
|
|
)
|
|
from cognee.shared.data_models import KnowledgeGraph
|
|
from cognee.infrastructure.llm.LLMGateway import LLMGateway
|
|
from cognee.tasks.graph.exceptions import (
|
|
InvalidGraphModelError,
|
|
InvalidDataChunksError,
|
|
InvalidChunkGraphInputError,
|
|
InvalidOntologyAdapterError,
|
|
)
|
|
|
|
|
|
async def integrate_chunk_graphs(
|
|
data_chunks: list[DocumentChunk],
|
|
chunk_graphs: list,
|
|
graph_model: Type[BaseModel],
|
|
ontology_adapter: RDFLibOntologyResolver,
|
|
) -> List[DocumentChunk]:
|
|
"""Updates DocumentChunk objects, integrates data points and edges into databases."""
|
|
|
|
if not isinstance(data_chunks, list) or not isinstance(chunk_graphs, list):
|
|
raise InvalidChunkGraphInputError("data_chunks and chunk_graphs must be lists.")
|
|
if len(data_chunks) != len(chunk_graphs):
|
|
raise InvalidChunkGraphInputError(
|
|
f"length mismatch: {len(data_chunks)} chunks vs {len(chunk_graphs)} graphs."
|
|
)
|
|
if not isinstance(graph_model, type) or not issubclass(graph_model, BaseModel):
|
|
raise InvalidGraphModelError(graph_model)
|
|
if ontology_adapter is None or not hasattr(ontology_adapter, "get_subgraph"):
|
|
raise InvalidOntologyAdapterError(
|
|
type(ontology_adapter).__name__ if ontology_adapter else "None"
|
|
)
|
|
|
|
graph_engine = await get_graph_engine()
|
|
|
|
if graph_model is not KnowledgeGraph:
|
|
for chunk_index, chunk_graph in enumerate(chunk_graphs):
|
|
data_chunks[chunk_index].contains = chunk_graph
|
|
|
|
return data_chunks
|
|
|
|
existing_edges_map = await retrieve_existing_edges(
|
|
data_chunks,
|
|
chunk_graphs,
|
|
)
|
|
|
|
graph_nodes, graph_edges = expand_with_nodes_and_edges(
|
|
data_chunks, chunk_graphs, ontology_adapter, existing_edges_map
|
|
)
|
|
|
|
if len(graph_nodes) > 0:
|
|
await add_data_points(graph_nodes)
|
|
|
|
if len(graph_edges) > 0:
|
|
await graph_engine.add_edges(graph_edges)
|
|
|
|
return data_chunks
|
|
|
|
|
|
async def extract_graph_from_data(
|
|
data_chunks: List[DocumentChunk],
|
|
graph_model: Type[BaseModel],
|
|
ontology_adapter: RDFLibOntologyResolver = None,
|
|
custom_prompt: Optional[str] = None,
|
|
) -> List[DocumentChunk]:
|
|
"""
|
|
Extracts and integrates a knowledge graph from the text content of document chunks using a specified graph model.
|
|
"""
|
|
|
|
if not isinstance(data_chunks, list) or not data_chunks:
|
|
raise InvalidDataChunksError("must be a non-empty list of DocumentChunk.")
|
|
if not all(hasattr(c, "text") for c in data_chunks):
|
|
raise InvalidDataChunksError("each chunk must have a 'text' attribute")
|
|
if not isinstance(graph_model, type) or not issubclass(graph_model, BaseModel):
|
|
raise InvalidGraphModelError(graph_model)
|
|
|
|
chunk_graphs = await asyncio.gather(
|
|
*[
|
|
LLMGateway.extract_content_graph(chunk.text, graph_model, custom_prompt=custom_prompt)
|
|
for chunk in data_chunks
|
|
]
|
|
)
|
|
|
|
# Note: Filter edges with missing source or target nodes
|
|
if graph_model == KnowledgeGraph:
|
|
for graph in chunk_graphs:
|
|
valid_node_ids = {node.id for node in graph.nodes}
|
|
graph.edges = [
|
|
edge
|
|
for edge in graph.edges
|
|
if edge.source_node_id in valid_node_ids and edge.target_node_id in valid_node_ids
|
|
]
|
|
|
|
return await integrate_chunk_graphs(
|
|
data_chunks, chunk_graphs, graph_model, ontology_adapter
|
|
)
|