This commit is contained in:
hajdul88 2025-12-19 10:25:24 +01:00
parent 8a490b1c16
commit 4b71995a70
10 changed files with 486 additions and 504 deletions

View file

@ -5,6 +5,7 @@ Revises: 211ab850ef3d
Create Date: 2025-11-04 21:45:52.642322 Create Date: 2025-11-04 21:45:52.642322
""" """
import os import os
from typing import Sequence, Union from typing import Sequence, Union
@ -13,8 +14,8 @@ import sqlalchemy as sa
# revision identifiers, used by Alembic. # revision identifiers, used by Alembic.
revision: str = 'e1ec1dcb50b6' revision: str = "e1ec1dcb50b6"
down_revision: Union[str, None] = '211ab850ef3d' down_revision: Union[str, None] = "211ab850ef3d"
branch_labels: Union[str, Sequence[str], None] = None branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None depends_on: Union[str, Sequence[str], None] = None
@ -33,9 +34,7 @@ def upgrade() -> None:
last_accessed_column = _get_column(insp, "data", "last_accessed") last_accessed_column = _get_column(insp, "data", "last_accessed")
if not last_accessed_column: if not last_accessed_column:
# Always create the column for schema consistency # Always create the column for schema consistency
op.add_column('data', op.add_column("data", sa.Column("last_accessed", sa.DateTime(timezone=True), nullable=True))
sa.Column('last_accessed', sa.DateTime(timezone=True), nullable=True)
)
# Only initialize existing records if feature is enabled # Only initialize existing records if feature is enabled
enable_last_accessed = os.getenv("ENABLE_LAST_ACCESSED", "false").lower() == "true" enable_last_accessed = os.getenv("ENABLE_LAST_ACCESSED", "false").lower() == "true"
@ -49,4 +48,4 @@ def downgrade() -> None:
last_accessed_column = _get_column(insp, "data", "last_accessed") last_accessed_column = _get_column(insp, "data", "last_accessed")
if last_accessed_column: if last_accessed_column:
op.drop_column('data', 'last_accessed') op.drop_column("data", "last_accessed")

View file

@ -2,6 +2,7 @@ from cognee.infrastructure.engine import DataPoint
from cognee.modules.engine.models.EntityType import EntityType from cognee.modules.engine.models.EntityType import EntityType
from typing import Optional from typing import Optional
class Entity(DataPoint): class Entity(DataPoint):
name: str name: str
is_a: Optional[EntityType] = None is_a: Optional[EntityType] = None

View file

@ -1,13 +1,12 @@
def get_entity_nodes_from_triplets(triplets): def get_entity_nodes_from_triplets(triplets):
entity_nodes = [] entity_nodes = []
seen_ids = set() seen_ids = set()
for triplet in triplets: for triplet in triplets:
if hasattr(triplet, 'node1') and triplet.node1 and triplet.node1.id not in seen_ids: if hasattr(triplet, "node1") and triplet.node1 and triplet.node1.id not in seen_ids:
entity_nodes.append({"id": str(triplet.node1.id)}) entity_nodes.append({"id": str(triplet.node1.id)})
seen_ids.add(triplet.node1.id) seen_ids.add(triplet.node1.id)
if hasattr(triplet, 'node2') and triplet.node2 and triplet.node2.id not in seen_ids: if hasattr(triplet, "node2") and triplet.node2 and triplet.node2.id not in seen_ids:
entity_nodes.append({"id": str(triplet.node2.id)}) entity_nodes.append({"id": str(triplet.node2.id)})
seen_ids.add(triplet.node2.id) seen_ids.add(triplet.node2.id)
return entity_nodes return entity_nodes

View file

@ -28,7 +28,7 @@ async def update_node_access_timestamps(items: List[Any]):
# Extract node IDs # Extract node IDs
node_ids = [] node_ids = []
for item in items: for item in items:
item_id = item.payload.get("id") if hasattr(item, 'payload') else item.get("id") item_id = item.payload.get("id") if hasattr(item, "payload") else item.get("id")
if item_id: if item_id:
node_ids.append(str(item_id)) node_ids.append(str(item_id))
@ -52,7 +52,7 @@ async def _find_origin_documents_via_projection(graph_engine, node_ids):
await memory_fragment.project_graph_from_db( await memory_fragment.project_graph_from_db(
graph_engine, graph_engine,
node_properties_to_project=["id", "type"], node_properties_to_project=["id", "type"],
edge_properties_to_project=["relationship_name"] edge_properties_to_project=["relationship_name"],
) )
# Find origin documents by traversing the in-memory graph # Find origin documents by traversing the in-memory graph
@ -63,7 +63,11 @@ async def _find_origin_documents_via_projection(graph_engine, node_ids):
# Traverse edges to find connected documents # Traverse edges to find connected documents
for edge in node.get_skeleton_edges(): for edge in node.get_skeleton_edges():
# Get the neighbor node # Get the neighbor node
neighbor = edge.get_destination_node() if edge.get_source_node().id == node_id else edge.get_source_node() neighbor = (
edge.get_destination_node()
if edge.get_source_node().id == node_id
else edge.get_source_node()
)
if neighbor and neighbor.get_attribute("type") in ["TextDocument", "Document"]: if neighbor and neighbor.get_attribute("type") in ["TextDocument", "Document"]:
doc_ids.add(neighbor.id) doc_ids.add(neighbor.id)
@ -74,9 +78,11 @@ async def _update_sql_records(doc_ids, timestamp_dt):
"""Update SQL Data table (same for all providers)""" """Update SQL Data table (same for all providers)"""
db_engine = get_relational_engine() db_engine = get_relational_engine()
async with db_engine.get_async_session() as session: async with db_engine.get_async_session() as session:
stmt = update(Data).where( stmt = (
Data.id.in_([UUID(doc_id) for doc_id in doc_ids]) update(Data)
).values(last_accessed=timestamp_dt) .where(Data.id.in_([UUID(doc_id) for doc_id in doc_ids]))
.values(last_accessed=timestamp_dt)
)
await session.execute(stmt) await session.execute(stmt)
await session.commit() await session.commit()

View file

@ -25,9 +25,7 @@ logger = get_logger(__name__)
async def cleanup_unused_data( async def cleanup_unused_data(
minutes_threshold: Optional[int], minutes_threshold: Optional[int], dry_run: bool = True, user_id: Optional[UUID] = None
dry_run: bool = True,
user_id: Optional[UUID] = None
) -> Dict[str, Any]: ) -> Dict[str, Any]:
""" """
Identify and remove unused data from the memify pipeline. Identify and remove unused data from the memify pipeline.
@ -48,15 +46,13 @@ async def cleanup_unused_data(
""" """
# Check 1: Environment variable must be enabled # Check 1: Environment variable must be enabled
if os.getenv("ENABLE_LAST_ACCESSED", "false").lower() != "true": if os.getenv("ENABLE_LAST_ACCESSED", "false").lower() != "true":
logger.warning( logger.warning("Cleanup skipped: ENABLE_LAST_ACCESSED is not enabled.")
"Cleanup skipped: ENABLE_LAST_ACCESSED is not enabled."
)
return { return {
"status": "skipped", "status": "skipped",
"reason": "ENABLE_LAST_ACCESSED not enabled", "reason": "ENABLE_LAST_ACCESSED not enabled",
"unused_count": 0, "unused_count": 0,
"deleted_count": {}, "deleted_count": {},
"cleanup_date": datetime.now(timezone.utc).isoformat() "cleanup_date": datetime.now(timezone.utc).isoformat(),
} }
# Check 2: Verify tracking has actually been running # Check 2: Verify tracking has actually been running
@ -79,14 +75,14 @@ async def cleanup_unused_data(
"reason": "No tracked records found - tracking may be newly enabled", "reason": "No tracked records found - tracking may be newly enabled",
"unused_count": 0, "unused_count": 0,
"deleted_count": {}, "deleted_count": {},
"cleanup_date": datetime.now(timezone.utc).isoformat() "cleanup_date": datetime.now(timezone.utc).isoformat(),
} }
logger.info( logger.info(
"Starting cleanup task", "Starting cleanup task",
minutes_threshold=minutes_threshold, minutes_threshold=minutes_threshold,
dry_run=dry_run, dry_run=dry_run,
user_id=str(user_id) if user_id else None user_id=str(user_id) if user_id else None,
) )
# Calculate cutoff timestamp # Calculate cutoff timestamp
@ -97,9 +93,7 @@ async def cleanup_unused_data(
async def _cleanup_via_sql( async def _cleanup_via_sql(
cutoff_date: datetime, cutoff_date: datetime, dry_run: bool, user_id: Optional[UUID] = None
dry_run: bool,
user_id: Optional[UUID] = None
) -> Dict[str, Any]: ) -> Dict[str, Any]:
""" """
SQL-based cleanup: Query Data table for unused documents and use cognee.delete(). SQL-based cleanup: Query Data table for unused documents and use cognee.delete().
@ -122,17 +116,15 @@ async def _cleanup_via_sql(
async with db_engine.get_async_session() as session: async with db_engine.get_async_session() as session:
# Query for Data records with old last_accessed timestamps # Query for Data records with old last_accessed timestamps
query = select(Data, DatasetData).join( query = (
DatasetData, Data.id == DatasetData.data_id select(Data, DatasetData)
).where( .join(DatasetData, Data.id == DatasetData.data_id)
or_( .where(or_(Data.last_accessed < cutoff_date, Data.last_accessed.is_(None)))
Data.last_accessed < cutoff_date,
Data.last_accessed.is_(None)
)
) )
if user_id: if user_id:
from cognee.modules.data.models import Dataset from cognee.modules.data.models import Dataset
query = query.join(Dataset, DatasetData.dataset_id == Dataset.id).where( query = query.join(Dataset, DatasetData.dataset_id == Dataset.id).where(
Dataset.owner_id == user_id Dataset.owner_id == user_id
) )
@ -146,19 +138,15 @@ async def _cleanup_via_sql(
return { return {
"status": "dry_run", "status": "dry_run",
"unused_count": len(unused_data), "unused_count": len(unused_data),
"deleted_count": { "deleted_count": {"data_items": 0, "documents": 0},
"data_items": 0,
"documents": 0
},
"cleanup_date": datetime.now(timezone.utc).isoformat(), "cleanup_date": datetime.now(timezone.utc).isoformat(),
"preview": { "preview": {"documents": len(unused_data)},
"documents": len(unused_data)
}
} }
# Delete each document using cognee.delete() # Delete each document using cognee.delete()
deleted_count = 0 deleted_count = 0
from cognee.modules.users.methods import get_default_user from cognee.modules.users.methods import get_default_user
user = await get_default_user() if user_id is None else None user = await get_default_user() if user_id is None else None
for data, dataset_data in unused_data: for data, dataset_data in unused_data:
@ -167,7 +155,7 @@ async def _cleanup_via_sql(
data_id=data.id, data_id=data.id,
dataset_id=dataset_data.dataset_id, dataset_id=dataset_data.dataset_id,
mode="hard", # Use hard mode to also remove orphaned entities mode="hard", # Use hard mode to also remove orphaned entities
user=user user=user,
) )
deleted_count += 1 deleted_count += 1
logger.info(f"Deleted document {data.id} from dataset {dataset_data.dataset_id}") logger.info(f"Deleted document {data.id} from dataset {dataset_data.dataset_id}")
@ -179,9 +167,6 @@ async def _cleanup_via_sql(
return { return {
"status": "completed", "status": "completed",
"unused_count": len(unused_data), "unused_count": len(unused_data),
"deleted_count": { "deleted_count": {"data_items": deleted_count, "documents": deleted_count},
"data_items": deleted_count, "cleanup_date": datetime.now(timezone.utc).isoformat(),
"documents": deleted_count
},
"cleanup_date": datetime.now(timezone.utc).isoformat()
} }

View file

@ -1,4 +1,3 @@
from typing import Union from typing import Union
from cognee.infrastructure.engine import DataPoint from cognee.infrastructure.engine import DataPoint
from cognee.modules.chunking.models import DocumentChunk from cognee.modules.chunking.models import DocumentChunk

View file

@ -73,7 +73,7 @@ async def test_textdocument_cleanup_with_sql():
query_type=SearchType.CHUNKS, query_type=SearchType.CHUNKS,
query_text="machine learning", query_text="machine learning",
datasets=[dataset_name], datasets=[dataset_name],
user=user user=user,
) )
logger.info(f"✅ Search completed, found {len(search_results)} results") logger.info(f"✅ Search completed, found {len(search_results)} results")
assert len(search_results) > 0, "Search should return results" assert len(search_results) > 0, "Search should return results"
@ -116,43 +116,35 @@ async def test_textdocument_cleanup_with_sql():
retrieved_timestamp = updated_data.last_accessed retrieved_timestamp = updated_data.last_accessed
if retrieved_timestamp.tzinfo is None: if retrieved_timestamp.tzinfo is None:
retrieved_timestamp = retrieved_timestamp.replace(tzinfo=timezone.utc) retrieved_timestamp = retrieved_timestamp.replace(tzinfo=timezone.utc)
assert retrieved_timestamp == aged_timestamp, ( assert retrieved_timestamp == aged_timestamp, f"Timestamp should be updated to aged value"
f"Timestamp should be updated to aged value"
)
# Step 5: Test cleanup (document-level is now the default) # Step 5: Test cleanup (document-level is now the default)
from cognee.tasks.cleanup.cleanup_unused_data import cleanup_unused_data from cognee.tasks.cleanup.cleanup_unused_data import cleanup_unused_data
# First do a dry run # First do a dry run
logger.info("Testing dry run...") logger.info("Testing dry run...")
dry_run_result = await cleanup_unused_data( dry_run_result = await cleanup_unused_data(minutes_threshold=10, dry_run=True, user_id=user.id)
minutes_threshold=10,
dry_run=True,
user_id=user.id
)
# Debug: Print the actual result # Debug: Print the actual result
logger.info(f"Dry run result: {dry_run_result}") logger.info(f"Dry run result: {dry_run_result}")
assert dry_run_result['status'] == 'dry_run', f"Status should be 'dry_run', got: {dry_run_result['status']}" assert dry_run_result["status"] == "dry_run", (
assert dry_run_result['unused_count'] > 0, ( f"Status should be 'dry_run', got: {dry_run_result['status']}"
"Should find at least one unused document"
) )
assert dry_run_result["unused_count"] > 0, "Should find at least one unused document"
logger.info(f"✅ Dry run found {dry_run_result['unused_count']} unused documents") logger.info(f"✅ Dry run found {dry_run_result['unused_count']} unused documents")
# Now run actual cleanup # Now run actual cleanup
logger.info("Executing cleanup...") logger.info("Executing cleanup...")
cleanup_result = await cleanup_unused_data( cleanup_result = await cleanup_unused_data(minutes_threshold=30, dry_run=False, user_id=user.id)
minutes_threshold=30,
dry_run=False,
user_id=user.id
)
assert cleanup_result["status"] == "completed", "Cleanup should complete successfully" assert cleanup_result["status"] == "completed", "Cleanup should complete successfully"
assert cleanup_result["deleted_count"]["documents"] > 0, ( assert cleanup_result["deleted_count"]["documents"] > 0, (
"At least one document should be deleted" "At least one document should be deleted"
) )
logger.info(f"✅ Cleanup completed. Deleted {cleanup_result['deleted_count']['documents']} documents") logger.info(
f"✅ Cleanup completed. Deleted {cleanup_result['deleted_count']['documents']} documents"
)
# Step 6: Verify deletion # Step 6: Verify deletion
async with db_engine.get_async_session() as session: async with db_engine.get_async_session() as session:
@ -168,5 +160,6 @@ async def test_textdocument_cleanup_with_sql():
if __name__ == "__main__": if __name__ == "__main__":
import asyncio import asyncio
success = asyncio.run(test_textdocument_cleanup_with_sql()) success = asyncio.run(test_textdocument_cleanup_with_sql())
exit(0 if success else 1) exit(0 if success else 1)