wip fix Memgraph get_knowledge_graph issues

This commit is contained in:
DavIvek 2025-07-10 16:56:44 +02:00
parent ba0cffd853
commit a0c4d88b0d
2 changed files with 24 additions and 17 deletions

View file

@ -179,9 +179,9 @@ The command-line `workspace` argument and the `WORKSPACE` environment variable i
- **For local file-based databases, data isolation is achieved through workspace subdirectories:** `JsonKVStorage`, `JsonDocStatusStorage`, `NetworkXStorage`, `NanoVectorDBStorage`, `FaissVectorDBStorage`. - **For local file-based databases, data isolation is achieved through workspace subdirectories:** `JsonKVStorage`, `JsonDocStatusStorage`, `NetworkXStorage`, `NanoVectorDBStorage`, `FaissVectorDBStorage`.
- **For databases that store data in collections, it's done by adding a workspace prefix to the collection name:** `RedisKVStorage`, `RedisDocStatusStorage`, `MilvusVectorDBStorage`, `QdrantVectorDBStorage`, `MongoKVStorage`, `MongoDocStatusStorage`, `MongoVectorDBStorage`, `MongoGraphStorage`, `PGGraphStorage`. - **For databases that store data in collections, it's done by adding a workspace prefix to the collection name:** `RedisKVStorage`, `RedisDocStatusStorage`, `MilvusVectorDBStorage`, `QdrantVectorDBStorage`, `MongoKVStorage`, `MongoDocStatusStorage`, `MongoVectorDBStorage`, `MongoGraphStorage`, `PGGraphStorage`.
- **For relational databases, data isolation is achieved by adding a `workspace` field to the tables for logical data separation:** `PGKVStorage`, `PGVectorStorage`, `PGDocStatusStorage`. - **For relational databases, data isolation is achieved by adding a `workspace` field to the tables for logical data separation:** `PGKVStorage`, `PGVectorStorage`, `PGDocStatusStorage`.
- **For the Neo4j graph database, logical data isolation is achieved through labels:** `Neo4JStorage` - **For graph databases, logical data isolation is achieved through labels:** `Neo4JStorage`, `MemgraphStorage`
To maintain compatibility with legacy data, the default workspace for PostgreSQL is `default` and for Neo4j is `base` when no workspace is configured. For all external storages, the system provides dedicated workspace environment variables to override the common `WORKSPACE` environment variable configuration. These storage-specific workspace environment variables are: `REDIS_WORKSPACE`, `MILVUS_WORKSPACE`, `QDRANT_WORKSPACE`, `MONGODB_WORKSPACE`, `POSTGRES_WORKSPACE`, `NEO4J_WORKSPACE`. To maintain compatibility with legacy data, the default workspace for PostgreSQL is `default` and for Neo4j is `base` when no workspace is configured. For all external storages, the system provides dedicated workspace environment variables to override the common `WORKSPACE` environment variable configuration. These storage-specific workspace environment variables are: `REDIS_WORKSPACE`, `MILVUS_WORKSPACE`, `QDRANT_WORKSPACE`, `MONGODB_WORKSPACE`, `POSTGRES_WORKSPACE`, `NEO4J_WORKSPACE`, `MEMGRAPH_WORKSPACE`.
### Multiple workers for Gunicorn + Uvicorn ### Multiple workers for Gunicorn + Uvicorn
@ -394,6 +394,7 @@ MongoKVStorage MongoDB
NetworkXStorage NetworkX (default) NetworkXStorage NetworkX (default)
Neo4JStorage Neo4J Neo4JStorage Neo4J
PGGraphStorage PostgreSQL with AGE plugin PGGraphStorage PostgreSQL with AGE plugin
MemgraphStorage. Memgraph
``` ```
> Testing has shown that Neo4J delivers superior performance in production environments compared to PostgreSQL with AGE plugin. > Testing has shown that Neo4J delivers superior performance in production environments compared to PostgreSQL with AGE plugin.

View file

@ -435,7 +435,7 @@ class MemgraphStorage(BaseGraphStorage):
async def upsert_node(self, node_id: str, node_data: dict[str, str]) -> None: async def upsert_node(self, node_id: str, node_data: dict[str, str]) -> None:
""" """
Upsert a node in the Neo4j database. Upsert a node in the Memgraph database.
Args: Args:
node_id: The unique identifier for the node (used as label) node_id: The unique identifier for the node (used as label)
@ -448,7 +448,7 @@ class MemgraphStorage(BaseGraphStorage):
properties = node_data properties = node_data
entity_type = properties["entity_type"] entity_type = properties["entity_type"]
if "entity_id" not in properties: if "entity_id" not in properties:
raise ValueError("Neo4j: node properties must contain an 'entity_id' field") raise ValueError("Memgraph: node properties must contain an 'entity_id' field")
try: try:
async with self._driver.session(database=self._DATABASE) as session: async with self._driver.session(database=self._DATABASE) as session:
@ -817,28 +817,34 @@ class MemgraphStorage(BaseGraphStorage):
WITH start WITH start
CALL {{ CALL {{
WITH start WITH start
MATCH path = (start)-[*0..{max_depth}]-(node) MATCH path = (start)-[*BFS 0..{max_depth}]-(node)
WITH nodes(path) AS path_nodes, relationships(path) AS path_rels WITH nodes(path) AS path_nodes, relationships(path) AS path_rels
UNWIND path_nodes AS n UNWIND path_nodes AS n
WITH collect(DISTINCT n) AS all_nodes, collect(DISTINCT path_rels) AS all_rel_lists WITH collect(DISTINCT n) AS all_nodes, collect(DISTINCT path_rels) AS all_rel_lists
WITH all_nodes, reduce(r = [], x IN all_rel_lists | r + x) AS all_rels WITH all_nodes, reduce(r = [], x IN all_rel_lists | r + x) AS all_rels
RETURN all_nodes, all_rels RETURN all_nodes, all_rels
}} }}
WITH all_nodes AS nodes, all_rels AS relationships, size(all_nodes) AS total_nodes WITH all_nodes AS nodes, all_rels AS relationships, size(all_nodes) AS total_nodes_found
WITH WITH
CASE CASE
WHEN total_nodes <= {max_nodes} THEN nodes WHEN total_nodes_found <= {max_nodes} THEN nodes
ELSE nodes[0..{max_nodes}] ELSE nodes[0..{max_nodes}]
END AS limited_nodes, END AS limited_nodes,
relationships, relationships,
total_nodes, total_nodes_found,
total_nodes > {max_nodes} AS is_truncated total_nodes_found > {max_nodes} AS is_truncated
UNWIND relationships AS rel
WITH limited_nodes, rel, total_nodes_found, is_truncated
WHERE startNode(rel) IN limited_nodes AND endNode(rel) IN limited_nodes
WITH limited_nodes, collect(DISTINCT rel) AS limited_relationships, total_nodes_found, is_truncated
RETURN RETURN
[node IN limited_nodes | {{node: node}}] AS node_info, [node IN limited_nodes | {{node: node}}] AS node_info,
relationships, limited_relationships AS relationships,
total_nodes, total_nodes_found,
is_truncated is_truncated
""" """
result_set = None result_set = None
try: try:
result_set = await session.run( result_set = await session.run(