generating testspecs for data
This commit is contained in:
parent
094ba7233e
commit
721fde3d60
1 changed files with 52 additions and 5 deletions
|
|
@ -1,14 +1,17 @@
|
|||
from swebench.harness.utils import load_swebench_dataset
|
||||
from swebench.harness.run_evaluation import get_dataset_from_preds
|
||||
from swebench.harness.run_evaluation import run_instances
|
||||
from swebench.harness.test_spec import make_test_spec, TestSpec
|
||||
|
||||
import subprocess
|
||||
from swebench.inference.make_datasets.create_instance import PATCH_EXAMPLE
|
||||
from evals.eval_utils import download_instances
|
||||
import cognee
|
||||
from cognee.api.v1.cognify.code_graph_pipeline import code_graph_pipeline
|
||||
from cognee.api.v1.search import SearchType
|
||||
import os
|
||||
from pathlib import Path
|
||||
from cognee.infrastructure.databases.graph import get_graph_engine
|
||||
from cognee.infrastructure.llm.get_llm_client import get_llm_client
|
||||
from cognee.shared.data_models import Answer
|
||||
|
||||
async def cognee_and_llm(dataset, search_type = SearchType.CHUNKS):
|
||||
await cognee.prune.prune_data()
|
||||
|
|
@ -47,7 +50,8 @@ async def cognee_and_llm(dataset, search_type = SearchType.CHUNKS):
|
|||
)
|
||||
return answer_prediction
|
||||
|
||||
def llm_on_preprocessed_data(dataset):
|
||||
|
||||
async def llm_on_preprocessed_data(dataset):
|
||||
problem_statement = dataset[0]['problem_statement']
|
||||
prompt = dataset[0]["text"]
|
||||
|
||||
|
|
@ -59,6 +63,47 @@ def llm_on_preprocessed_data(dataset):
|
|||
)
|
||||
return answer_prediction
|
||||
|
||||
async def get_preds(dataset, with_cognee):
|
||||
if with_cognee:
|
||||
text_output = await cognee_and_llm(dataset)
|
||||
model_name = "with_cognee"
|
||||
else:
|
||||
text_output = await llm_on_preprocessed_data(dataset)
|
||||
model_name = "without_cognee"
|
||||
|
||||
preds = {dataset[0]["instance_id"]:
|
||||
{"instance_id": dataset[0]["instance_id"],
|
||||
"model_patch": text_output,
|
||||
"model_name_or_path": model_name}}
|
||||
|
||||
dataset_name = 'princeton-nlp/SWE-bench' if with_cognee else 'princeton-nlp/SWE-bench_bm25_13K'
|
||||
preds_dataset = get_dataset_from_preds(dataset_name,
|
||||
"test",
|
||||
[dataset[0]["instance_id"]],
|
||||
preds,
|
||||
model_name)
|
||||
|
||||
return preds, preds_dataset
|
||||
|
||||
async def evaluate(test_specs: list[TestSpec],
|
||||
preds: dict,
|
||||
):
|
||||
for test_spec in test_specs:
|
||||
pred = preds[test_spec.instance_id]
|
||||
log_dir = Path("logs")
|
||||
log_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
patch_file = Path(log_dir / "patch.diff")
|
||||
patch_file.write_text(pred["model_patch"] or "")
|
||||
for command in test_spec.repo_script_list:
|
||||
if "/testbed" in command:
|
||||
command = command.replace("/testbed", "./testbed")
|
||||
result = subprocess.run(command, shell=True, check=True, capture_output=True, text=True)
|
||||
print(result)
|
||||
|
||||
subprocess.run("git apply --allow-empty -v logs/patch.diff", shell=True, capture_output=True, text=True)
|
||||
|
||||
|
||||
|
||||
async def main():
|
||||
swe_dataset = load_swebench_dataset('princeton-nlp/SWE-bench', split='test')
|
||||
|
|
@ -73,8 +118,10 @@ async def main():
|
|||
else:
|
||||
dataset = download_instances(test_data, filepath)
|
||||
|
||||
llm_output_with_cognee = await cognee_and_llm(dataset)
|
||||
llm_output_without_cognee = llm_on_preprocessed_data(test_data_preprocessed)
|
||||
cognee_preds, cognee_preds_dataset = await get_preds(dataset, with_cognee=True)
|
||||
# nocognee_preds = await get_preds(dataset, with_cognee=False)
|
||||
test_specs = list(map(make_test_spec, test_data))
|
||||
results = await evaluate(test_specs, cognee_preds)
|
||||
|
||||
if __name__ == "__main__":
|
||||
import asyncio
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue