running swebench evaluation as subprocess
This commit is contained in:
parent
ed08cdb9f9
commit
98e3445c2c
3 changed files with 190 additions and 92 deletions
|
|
@ -1,3 +1,8 @@
|
||||||
|
from cognee.infrastructure.databases.vector import get_vector_engine
|
||||||
|
from cognee.base_config import get_base_config
|
||||||
|
import os
|
||||||
|
import logging
|
||||||
|
from cognee.infrastructure.llm.get_llm_client import get_llm_client
|
||||||
from typing import List, Dict, Type
|
from typing import List, Dict, Type
|
||||||
from swebench.harness.utils import load_swebench_dataset
|
from swebench.harness.utils import load_swebench_dataset
|
||||||
from deepeval.dataset import EvaluationDataset
|
from deepeval.dataset import EvaluationDataset
|
||||||
|
|
@ -21,8 +26,6 @@ def convert_swe_to_deepeval(swe_dataset: List[Dict]):
|
||||||
expected_output = datum["patch"]
|
expected_output = datum["patch"]
|
||||||
context = [datum["text"]]
|
context = [datum["text"]]
|
||||||
# retrieval_context = datum.get(retrieval_context_key_name)
|
# retrieval_context = datum.get(retrieval_context_key_name)
|
||||||
# tools_called = datum.get(tools_called_key_name)
|
|
||||||
# expected_tools = json_obj.get(expected_tools_key_name)
|
|
||||||
|
|
||||||
deepeval_dataset.add_test_case(
|
deepeval_dataset.add_test_case(
|
||||||
LLMTestCase(
|
LLMTestCase(
|
||||||
|
|
@ -31,33 +34,32 @@ def convert_swe_to_deepeval(swe_dataset: List[Dict]):
|
||||||
expected_output=expected_output,
|
expected_output=expected_output,
|
||||||
context=context,
|
context=context,
|
||||||
# retrieval_context=retrieval_context,
|
# retrieval_context=retrieval_context,
|
||||||
# tools_called=tools_called,
|
|
||||||
# expected_tools=expected_tools,
|
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
return deepeval_dataset
|
return deepeval_dataset
|
||||||
|
|
||||||
|
|
||||||
from cognee.infrastructure.llm.get_llm_client import get_llm_client
|
swe_dataset = load_swebench_dataset(
|
||||||
|
'princeton-nlp/SWE-bench_bm25_13K', split='test')
|
||||||
swe_dataset = load_swebench_dataset('princeton-nlp/SWE-bench_bm25_13K', split='test')
|
|
||||||
deepeval_dataset = convert_swe_to_deepeval(swe_dataset)
|
deepeval_dataset = convert_swe_to_deepeval(swe_dataset)
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
class AnswerModel(BaseModel):
|
|
||||||
response:str
|
|
||||||
|
|
||||||
def get_answer_base(content: str, context:str, response_model: Type[BaseModel]):
|
class AnswerModel(BaseModel):
|
||||||
|
response: str
|
||||||
|
|
||||||
|
|
||||||
|
def get_answer_base(content: str, context: str, response_model: Type[BaseModel]):
|
||||||
llm_client = get_llm_client()
|
llm_client = get_llm_client()
|
||||||
|
|
||||||
system_prompt = "THIS IS YOUR CONTEXT:" + str(context)
|
system_prompt = "THIS IS YOUR CONTEXT:" + str(context)
|
||||||
|
|
||||||
return llm_client.create_structured_output(content, system_prompt, response_model)
|
return llm_client.create_structured_output(content, system_prompt, response_model)
|
||||||
|
|
||||||
def get_answer(content: str,context, model: Type[BaseModel]= AnswerModel):
|
|
||||||
|
def get_answer(content: str, context, model: Type[BaseModel] = AnswerModel):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return (get_answer_base(
|
return (get_answer_base(
|
||||||
|
|
@ -66,9 +68,11 @@ def get_answer(content: str,context, model: Type[BaseModel]= AnswerModel):
|
||||||
model
|
model
|
||||||
))
|
))
|
||||||
except Exception as error:
|
except Exception as error:
|
||||||
logger.error("Error extracting cognitive layers from content: %s", error, exc_info = True)
|
logger.error(
|
||||||
|
"Error extracting cognitive layers from content: %s", error, exc_info=True)
|
||||||
raise error
|
raise error
|
||||||
|
|
||||||
|
|
||||||
async def run_cognify_base_rag():
|
async def run_cognify_base_rag():
|
||||||
from cognee.api.v1.add import add
|
from cognee.api.v1.add import add
|
||||||
from cognee.api.v1.prune import prune
|
from cognee.api.v1.prune import prune
|
||||||
|
|
@ -82,11 +86,7 @@ async def run_cognify_base_rag():
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
import os
|
async def cognify_search_base_rag(content: str, context: str):
|
||||||
from cognee.base_config import get_base_config
|
|
||||||
from cognee.infrastructure.databases.vector import get_vector_engine
|
|
||||||
|
|
||||||
async def cognify_search_base_rag(content:str, context:str):
|
|
||||||
base_config = get_base_config()
|
base_config = get_base_config()
|
||||||
|
|
||||||
cognee_directory_path = os.path.abspath(".cognee_system")
|
cognee_directory_path = os.path.abspath(".cognee_system")
|
||||||
|
|
@ -99,7 +99,8 @@ async def cognify_search_base_rag(content:str, context:str):
|
||||||
print("results", return_)
|
print("results", return_)
|
||||||
return return_
|
return return_
|
||||||
|
|
||||||
async def cognify_search_graph(content:str, context:str):
|
|
||||||
|
async def cognify_search_graph(content: str, context: str):
|
||||||
from cognee.api.v1.search import search, SearchType
|
from cognee.api.v1.search import search, SearchType
|
||||||
params = {'query': 'Donald Trump'}
|
params = {'query': 'Donald Trump'}
|
||||||
|
|
||||||
|
|
@ -114,7 +115,8 @@ def convert_goldens_to_test_cases(test_cases_raw: List[LLMTestCase]) -> List[LLM
|
||||||
test_case = LLMTestCase(
|
test_case = LLMTestCase(
|
||||||
input=case.input,
|
input=case.input,
|
||||||
# Generate actual output using the 'input' and 'additional_metadata'
|
# Generate actual output using the 'input' and 'additional_metadata'
|
||||||
actual_output= str(get_answer(case.input, case.context).model_dump()['response']),
|
actual_output=str(get_answer(
|
||||||
|
case.input, case.context).model_dump()['response']),
|
||||||
expected_output=case.expected_output,
|
expected_output=case.expected_output,
|
||||||
context=case.context,
|
context=case.context,
|
||||||
retrieval_context=["retrieval_context"],
|
retrieval_context=["retrieval_context"],
|
||||||
|
|
@ -122,6 +124,7 @@ def convert_goldens_to_test_cases(test_cases_raw: List[LLMTestCase]) -> List[LLM
|
||||||
test_cases.append(test_case)
|
test_cases.append(test_case)
|
||||||
return test_cases
|
return test_cases
|
||||||
|
|
||||||
|
|
||||||
def convert_swe_to_deepeval_testcases(swe_dataset: List[Dict]):
|
def convert_swe_to_deepeval_testcases(swe_dataset: List[Dict]):
|
||||||
deepeval_dataset = EvaluationDataset()
|
deepeval_dataset = EvaluationDataset()
|
||||||
for datum in swe_dataset[:4]:
|
for datum in swe_dataset[:4]:
|
||||||
|
|
@ -135,7 +138,8 @@ def convert_swe_to_deepeval_testcases(swe_dataset: List[Dict]):
|
||||||
deepeval_dataset.add_test_case(
|
deepeval_dataset.add_test_case(
|
||||||
LLMTestCase(
|
LLMTestCase(
|
||||||
input=input,
|
input=input,
|
||||||
actual_output= str(get_answer(input, context).model_dump()['response']),
|
actual_output=str(get_answer(
|
||||||
|
input, context).model_dump()['response']),
|
||||||
expected_output=expected_output,
|
expected_output=expected_output,
|
||||||
context=context,
|
context=context,
|
||||||
# retrieval_context=retrieval_context,
|
# retrieval_context=retrieval_context,
|
||||||
|
|
@ -145,7 +149,9 @@ def convert_swe_to_deepeval_testcases(swe_dataset: List[Dict]):
|
||||||
)
|
)
|
||||||
return deepeval_dataset
|
return deepeval_dataset
|
||||||
|
|
||||||
swe_dataset = load_swebench_dataset('princeton-nlp/SWE-bench_bm25_13K', split='test')
|
|
||||||
|
swe_dataset = load_swebench_dataset(
|
||||||
|
'princeton-nlp/SWE-bench_bm25_13K', split='test')
|
||||||
test_dataset = convert_swe_to_deepeval_testcases(swe_dataset)
|
test_dataset = convert_swe_to_deepeval_testcases(swe_dataset)
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
@ -159,7 +165,8 @@ if __name__ == "__main__":
|
||||||
asyncio.run(main())
|
asyncio.run(main())
|
||||||
# run_cognify_base_rag_and_search()
|
# run_cognify_base_rag_and_search()
|
||||||
# # Data preprocessing before setting the dataset test cases
|
# # Data preprocessing before setting the dataset test cases
|
||||||
swe_dataset = load_swebench_dataset('princeton-nlp/SWE-bench_bm25_13K', split='test')
|
swe_dataset = load_swebench_dataset(
|
||||||
|
'princeton-nlp/SWE-bench_bm25_13K', split='test')
|
||||||
test_dataset = convert_swe_to_deepeval_testcases(swe_dataset)
|
test_dataset = convert_swe_to_deepeval_testcases(swe_dataset)
|
||||||
from deepeval.metrics import HallucinationMetric
|
from deepeval.metrics import HallucinationMetric
|
||||||
metric = HallucinationMetric()
|
metric = HallucinationMetric()
|
||||||
|
|
|
||||||
|
|
@ -1,36 +1,36 @@
|
||||||
from swebench.harness.utils import load_swebench_dataset
|
import json
|
||||||
from swebench.harness.run_evaluation import get_dataset_from_preds
|
|
||||||
from swebench.harness.run_evaluation import run_instances
|
|
||||||
from swebench.harness.test_spec import make_test_spec, TestSpec
|
|
||||||
|
|
||||||
import subprocess
|
import subprocess
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from swebench.harness.utils import load_swebench_dataset
|
||||||
from swebench.inference.make_datasets.create_instance import PATCH_EXAMPLE
|
from swebench.inference.make_datasets.create_instance import PATCH_EXAMPLE
|
||||||
from evals.eval_utils import download_instances
|
|
||||||
import cognee
|
import cognee
|
||||||
from cognee.api.v1.cognify.code_graph_pipeline import code_graph_pipeline
|
from cognee.api.v1.cognify.code_graph_pipeline import code_graph_pipeline
|
||||||
from cognee.api.v1.search import SearchType
|
from cognee.api.v1.search import SearchType
|
||||||
from pathlib import Path
|
|
||||||
from cognee.infrastructure.databases.graph import get_graph_engine
|
from cognee.infrastructure.databases.graph import get_graph_engine
|
||||||
from cognee.infrastructure.llm.get_llm_client import get_llm_client
|
from cognee.infrastructure.llm.get_llm_client import get_llm_client
|
||||||
|
from evals.eval_utils import download_instances
|
||||||
|
|
||||||
async def cognee_and_llm(dataset, search_type = SearchType.CHUNKS):
|
|
||||||
|
async def cognee_and_llm(dataset, search_type=SearchType.CHUNKS):
|
||||||
await cognee.prune.prune_data()
|
await cognee.prune.prune_data()
|
||||||
await cognee.prune.prune_system(metadata = True)
|
await cognee.prune.prune_system(metadata=True)
|
||||||
|
|
||||||
dataset_name = "SWE_test_data"
|
dataset_name = "SWE_test_data"
|
||||||
code_text = dataset[0]["text"][:100000]
|
code_text = dataset[0]["text"]
|
||||||
await cognee.add([code_text], dataset_name)
|
await cognee.add([code_text], dataset_name)
|
||||||
await code_graph_pipeline([dataset_name])
|
await code_graph_pipeline([dataset_name])
|
||||||
graph_engine = await get_graph_engine()
|
graph_engine = await get_graph_engine()
|
||||||
with open(graph_engine.filename, "r") as f:
|
with open(graph_engine.filename, "r") as f:
|
||||||
graph_str = f.read()
|
graph_str = f.read()
|
||||||
|
|
||||||
problem_statement = dataset[0]['problem_statement']
|
problem_statement = dataset[0]['problem_statement']
|
||||||
instructions = (
|
instructions = (
|
||||||
f"I need you to solve this issue by looking at the provided knowledge graph and by "
|
"I need you to solve this issue by looking at the provided knowledge graph and by "
|
||||||
+ f"generating a single patch file that I can apply directly to this repository "
|
+ "generating a single patch file that I can apply directly to this repository "
|
||||||
+ f"using git apply. Please respond with a single patch "
|
+ "using git apply. Please respond with a single patch "
|
||||||
+ f"file in the following format."
|
+ "file in the following format."
|
||||||
)
|
)
|
||||||
|
|
||||||
prompt = "\n".join([
|
prompt = "\n".join([
|
||||||
|
|
@ -44,10 +44,10 @@ async def cognee_and_llm(dataset, search_type = SearchType.CHUNKS):
|
||||||
|
|
||||||
llm_client = get_llm_client()
|
llm_client = get_llm_client()
|
||||||
answer_prediction = llm_client.create_structured_output(
|
answer_prediction = llm_client.create_structured_output(
|
||||||
text_input = problem_statement,
|
text_input=problem_statement,
|
||||||
system_prompt = prompt,
|
system_prompt=prompt,
|
||||||
response_model = str,
|
response_model=str,
|
||||||
)
|
)
|
||||||
return answer_prediction
|
return answer_prediction
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -57,12 +57,13 @@ async def llm_on_preprocessed_data(dataset):
|
||||||
|
|
||||||
llm_client = get_llm_client()
|
llm_client = get_llm_client()
|
||||||
answer_prediction = llm_client.create_structured_output(
|
answer_prediction = llm_client.create_structured_output(
|
||||||
text_input = problem_statement,
|
text_input=problem_statement,
|
||||||
system_prompt = prompt, # TODO check if this is correct
|
system_prompt=prompt,
|
||||||
response_model = str,
|
response_model=str,
|
||||||
)
|
)
|
||||||
return answer_prediction
|
return answer_prediction
|
||||||
|
|
||||||
|
|
||||||
async def get_preds(dataset, with_cognee=True):
|
async def get_preds(dataset, with_cognee=True):
|
||||||
if with_cognee:
|
if with_cognee:
|
||||||
text_output = await cognee_and_llm(dataset)
|
text_output = await cognee_and_llm(dataset)
|
||||||
|
|
@ -71,43 +72,18 @@ async def get_preds(dataset, with_cognee=True):
|
||||||
text_output = await llm_on_preprocessed_data(dataset)
|
text_output = await llm_on_preprocessed_data(dataset)
|
||||||
model_name = "without_cognee"
|
model_name = "without_cognee"
|
||||||
|
|
||||||
preds = {dataset[0]["instance_id"]:
|
preds = [{"instance_id": dataset[0]["instance_id"],
|
||||||
{"instance_id": dataset[0]["instance_id"],
|
"model_patch": text_output,
|
||||||
"model_patch": text_output,
|
"model_name_or_path": model_name}]
|
||||||
"model_name_or_path": model_name}}
|
|
||||||
|
|
||||||
dataset_name = 'princeton-nlp/SWE-bench' if with_cognee else 'princeton-nlp/SWE-bench_bm25_13K'
|
|
||||||
preds_dataset = get_dataset_from_preds(dataset_name,
|
|
||||||
"test",
|
|
||||||
[dataset[0]["instance_id"]],
|
|
||||||
preds,
|
|
||||||
model_name)
|
|
||||||
|
|
||||||
return preds, preds_dataset
|
|
||||||
|
|
||||||
async def evaluate(test_specs: list[TestSpec],
|
|
||||||
preds: dict,
|
|
||||||
):
|
|
||||||
for test_spec in test_specs:
|
|
||||||
pred = preds[test_spec.instance_id]
|
|
||||||
log_dir = Path("logs")
|
|
||||||
log_dir.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
patch_file = Path(log_dir / "patch.diff")
|
|
||||||
patch_file.write_text(pred["model_patch"] or "")
|
|
||||||
for command in test_spec.repo_script_list:
|
|
||||||
if "/testbed" in command:
|
|
||||||
command = command.replace("/testbed", "./testbed")
|
|
||||||
result = subprocess.run(command, shell=True, check=True, capture_output=True, text=True)
|
|
||||||
print(result)
|
|
||||||
|
|
||||||
subprocess.run("git apply --allow-empty -v logs/patch.diff", shell=True, capture_output=True, text=True)
|
|
||||||
|
|
||||||
|
return preds
|
||||||
|
|
||||||
|
|
||||||
async def main():
|
async def main():
|
||||||
swe_dataset = load_swebench_dataset('princeton-nlp/SWE-bench', split='test')
|
swe_dataset = load_swebench_dataset(
|
||||||
swe_dataset_preprocessed = load_swebench_dataset('princeton-nlp/SWE-bench_bm25_13K', split='test')
|
'princeton-nlp/SWE-bench', split='test')
|
||||||
|
swe_dataset_preprocessed = load_swebench_dataset(
|
||||||
|
'princeton-nlp/SWE-bench_bm25_13K', split='test')
|
||||||
test_data = swe_dataset[:1]
|
test_data = swe_dataset[:1]
|
||||||
test_data_preprocessed = swe_dataset_preprocessed[:1]
|
test_data_preprocessed = swe_dataset_preprocessed[:1]
|
||||||
assert test_data[0]["instance_id"] == test_data_preprocessed[0]["instance_id"]
|
assert test_data[0]["instance_id"] == test_data_preprocessed[0]["instance_id"]
|
||||||
|
|
@ -118,10 +94,18 @@ async def main():
|
||||||
else:
|
else:
|
||||||
dataset = download_instances(test_data, filepath)
|
dataset = download_instances(test_data, filepath)
|
||||||
|
|
||||||
cognee_preds, cognee_preds_dataset = await get_preds(dataset, with_cognee=True)
|
cognee_preds = await get_preds(dataset, with_cognee=True)
|
||||||
# nocognee_preds = await get_preds(dataset, with_cognee=False)
|
# nocognee_preds = await get_preds(dataset, with_cognee=False)
|
||||||
test_specs = list(map(make_test_spec, test_data))
|
with open("withcognee.json", "w") as file:
|
||||||
results = await evaluate(test_specs, cognee_preds)
|
json.dump(cognee_preds, file)
|
||||||
|
|
||||||
|
subprocess.run(["python", "-m", "swebench.harness.run_evaluation",
|
||||||
|
"--dataset_name", 'princeton-nlp/SWE-bench',
|
||||||
|
"--split", "test",
|
||||||
|
"--predictions_path", "withcognee.json",
|
||||||
|
"--max_workers", "1",
|
||||||
|
"--instance_ids", test_data[0]["instance_id"],
|
||||||
|
"--run_id", "with_cognee"])
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import asyncio
|
import asyncio
|
||||||
|
|
|
||||||
107
evals/eval_utils.py
Normal file
107
evals/eval_utils.py
Normal file
|
|
@ -0,0 +1,107 @@
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import traceback
|
||||||
|
from copy import deepcopy
|
||||||
|
from pathlib import Path
|
||||||
|
from tempfile import TemporaryDirectory
|
||||||
|
|
||||||
|
import unidiff
|
||||||
|
from datasets import Dataset
|
||||||
|
from swebench.inference.make_datasets.create_instance import make_code_text
|
||||||
|
from swebench.inference.make_datasets.utils import (AutoContextManager,
|
||||||
|
ingest_directory_contents)
|
||||||
|
from tqdm.auto import tqdm
|
||||||
|
|
||||||
|
|
||||||
|
def ingest_files(filenames):
|
||||||
|
files_dict = dict()
|
||||||
|
for filename in filenames:
|
||||||
|
with open(filename) as f:
|
||||||
|
content = f.read()
|
||||||
|
files_dict[filename] = content
|
||||||
|
return files_dict
|
||||||
|
|
||||||
|
|
||||||
|
def ingest_repos(input_instances):
|
||||||
|
orig_dir = os.getcwd()
|
||||||
|
with TemporaryDirectory(
|
||||||
|
dir="/scratch" if os.path.exists("/scratch") else "/tmp"
|
||||||
|
) as root_dir:
|
||||||
|
for instance in tqdm(
|
||||||
|
input_instances.values(),
|
||||||
|
total=len(input_instances),
|
||||||
|
desc="Downloading repos on specific commits",
|
||||||
|
):
|
||||||
|
try:
|
||||||
|
with AutoContextManager(
|
||||||
|
instance, root_dir
|
||||||
|
) as cm:
|
||||||
|
readmes = cm.get_readme_files()
|
||||||
|
instance["readmes"] = ingest_files(readmes)
|
||||||
|
instance["file_contents"] = ingest_directory_contents(
|
||||||
|
cm.repo_path
|
||||||
|
)
|
||||||
|
finally:
|
||||||
|
# if AutoContextManager fails to exit properly future exits will return the wrong directory
|
||||||
|
os.chdir(orig_dir)
|
||||||
|
|
||||||
|
return input_instances
|
||||||
|
|
||||||
|
|
||||||
|
def extract_fields(instance):
|
||||||
|
readmes_text = make_code_text(instance["readmes"])
|
||||||
|
code_text = make_code_text(
|
||||||
|
instance["file_contents"], add_line_numbers=False)
|
||||||
|
|
||||||
|
text_inputs = "\n".join([readmes_text, code_text])
|
||||||
|
text_inputs = text_inputs.strip() + "\n\n"
|
||||||
|
# text_inputs = code_text
|
||||||
|
patch = "\n".join([f"<patch>", instance["patch"], "</patch>"])
|
||||||
|
return {**instance, "text": text_inputs, "patch": patch}
|
||||||
|
|
||||||
|
|
||||||
|
def create_dataset(input_instances):
|
||||||
|
columns = [
|
||||||
|
"instance_id",
|
||||||
|
"text",
|
||||||
|
"repo",
|
||||||
|
"base_commit",
|
||||||
|
"problem_statement",
|
||||||
|
"hints_text",
|
||||||
|
"created_at",
|
||||||
|
"patch",
|
||||||
|
"test_patch",
|
||||||
|
"version",
|
||||||
|
"FAIL_TO_PASS",
|
||||||
|
"PASS_TO_PASS",
|
||||||
|
"environment_setup_commit",
|
||||||
|
]
|
||||||
|
|
||||||
|
data_table = {key: list() for key in columns}
|
||||||
|
for instance in input_instances.values():
|
||||||
|
datum = extract_fields(instance)
|
||||||
|
for key in columns:
|
||||||
|
data_table[key].append(datum[key] if key in datum else "")
|
||||||
|
dataset = Dataset.from_dict(data_table)
|
||||||
|
|
||||||
|
return dataset
|
||||||
|
|
||||||
|
|
||||||
|
def download_instances(
|
||||||
|
input_data,
|
||||||
|
path=Path("SWE-bench_testsample"),
|
||||||
|
verbose=False,
|
||||||
|
):
|
||||||
|
"""Downloads code from github.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
- input_data: dictionary with unprocessed input instances.
|
||||||
|
- verbose: set ContextManager verbose to True
|
||||||
|
"""
|
||||||
|
input_instances = {x["instance_id"]: x for x in input_data}
|
||||||
|
input_instances_copy = deepcopy(input_instances)
|
||||||
|
input_instances_with_text = ingest_repos(input_instances_copy)
|
||||||
|
dataset = create_dataset(input_instances_with_text)
|
||||||
|
dataset.save_to_disk(path)
|
||||||
|
return dataset
|
||||||
Loading…
Add table
Reference in a new issue