diff --git a/cognee/modules/pipelines/operations/pipeline.py b/cognee/modules/pipelines/operations/pipeline.py index 3de424796..285790ecb 100644 --- a/cognee/modules/pipelines/operations/pipeline.py +++ b/cognee/modules/pipelines/operations/pipeline.py @@ -7,6 +7,7 @@ from cognee.modules.data.methods.get_dataset_data import get_dataset_data from cognee.modules.data.models import Data, Dataset from cognee.modules.pipelines.operations.run_tasks import run_tasks from cognee.modules.pipelines.models import PipelineRunStatus +from cognee.modules.pipelines.utils import generate_pipeline_id from cognee.modules.pipelines.operations.get_pipeline_status import get_pipeline_status from cognee.modules.pipelines.methods import get_pipeline_run_by_dataset @@ -124,22 +125,22 @@ async def run_pipeline( # Ugly hack, but no easier way to do this. if pipeline_name == "add_pipeline": + pipeline_id = generate_pipeline_id(user.id, dataset.id, pipeline_name) # Refresh the add pipeline status so data is added to a dataset. # Without this the app_pipeline status will be DATASET_PROCESSING_COMPLETED and will skip the execution. - dataset_id = uuid5(NAMESPACE_OID, f"{dataset.name}{str(user.id)}") await log_pipeline_run_initiated( - pipeline_id=uuid5(NAMESPACE_OID, "add_pipeline"), + pipeline_id=pipeline_id, pipeline_name="add_pipeline", - dataset_id=dataset_id, + dataset_id=dataset.id, ) # Refresh the cognify pipeline status after we add new files. # Without this the cognify_pipeline status will be DATASET_PROCESSING_COMPLETED and will skip the execution. await log_pipeline_run_initiated( - pipeline_id=uuid5(NAMESPACE_OID, "cognify_pipeline"), + pipeline_id=pipeline_id, pipeline_name="cognify_pipeline", - dataset_id=dataset_id, + dataset_id=dataset.id, ) dataset_id = dataset.id