From ed08cdb9f9cff6a3f33d1a5f7f81cc66835a4a1a Mon Sep 17 00:00:00 2001 From: Rita Aleksziev Date: Fri, 15 Nov 2024 17:54:41 +0100 Subject: [PATCH] using the code graph pipeline instead of cognify --- evals/eval_swe_bench.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/evals/eval_swe_bench.py b/evals/eval_swe_bench.py index 3aabfcba3..9acb176b7 100644 --- a/evals/eval_swe_bench.py +++ b/evals/eval_swe_bench.py @@ -20,7 +20,7 @@ async def cognee_and_llm(dataset, search_type = SearchType.CHUNKS): dataset_name = "SWE_test_data" code_text = dataset[0]["text"][:100000] await cognee.add([code_text], dataset_name) - await cognee.cognify([dataset_name]) + await code_graph_pipeline([dataset_name]) graph_engine = await get_graph_engine() with open(graph_engine.filename, "r") as f: graph_str = f.read() @@ -63,7 +63,7 @@ async def llm_on_preprocessed_data(dataset): ) return answer_prediction -async def get_preds(dataset, with_cognee): +async def get_preds(dataset, with_cognee=True): if with_cognee: text_output = await cognee_and_llm(dataset) model_name = "with_cognee"