Autoformat graph pydantic conversion code

This commit is contained in:
Leon Luithlen 2024-11-15 16:44:30 +01:00
parent 148eb4ed9b
commit 5b420ebccc
2 changed files with 40 additions and 31 deletions

View file

@ -1,8 +1,9 @@
import time
import psutil
import tracemalloc
import statistics import statistics
from typing import Callable, Any, Dict import time
import tracemalloc
from typing import Any, Callable, Dict
import psutil
def benchmark_function(func: Callable, *args, num_runs: int = 5) -> Dict[str, Any]: def benchmark_function(func: Callable, *args, num_runs: int = 5) -> Dict[str, Any]:
@ -39,7 +40,9 @@ def benchmark_function(func: Callable, *args, num_runs: int = 5) -> Dict[str, An
# Calculate metrics # Calculate metrics
execution_time = end_time - start_time execution_time = end_time - start_time
cpu_time = (end_cpu_time.user + end_cpu_time.system) - (start_cpu_time.user + start_cpu_time.system) cpu_time = (end_cpu_time.user + end_cpu_time.system) - (
start_cpu_time.user + start_cpu_time.system
)
current, peak = tracemalloc.get_traced_memory() current, peak = tracemalloc.get_traced_memory()
final_memory = process.memory_info().rss final_memory = process.memory_info().rss
memory_used = final_memory - initial_memory memory_used = final_memory - initial_memory
@ -55,7 +58,7 @@ def benchmark_function(func: Callable, *args, num_runs: int = 5) -> Dict[str, An
"mean_execution_time": statistics.mean(execution_times), "mean_execution_time": statistics.mean(execution_times),
"mean_peak_memory_mb": statistics.mean(peak_memory_usages), "mean_peak_memory_mb": statistics.mean(peak_memory_usages),
"mean_cpu_percent": statistics.mean(cpu_percentages), "mean_cpu_percent": statistics.mean(cpu_percentages),
"num_runs": num_runs "num_runs": num_runs,
} }
if num_runs > 1: if num_runs > 1:

View file

@ -1,26 +1,30 @@
import time
import argparse import argparse
import time
from benchmark_function import benchmark_function from benchmark_function import benchmark_function
from cognee.modules.graph.utils import get_graph_from_model
from cognee.modules.graph.utils import get_graph_from_model
from cognee.tests.unit.interfaces.graph.util import ( from cognee.tests.unit.interfaces.graph.util import (
PERSON_NAMES, PERSON_NAMES,
create_organization_recursive, create_organization_recursive,
) )
# Example usage: # Example usage:
if __name__ == "__main__": if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Benchmark graph model with configurable recursive depth') parser = argparse.ArgumentParser(
parser.add_argument('--recursive-depth', type=int, default=3, description="Benchmark graph model with configurable recursive depth"
help='Recursive depth for graph generation (default: 3)') )
parser.add_argument('--runs', type=int, default=5, parser.add_argument(
help='Number of benchmark runs (default: 5)') "--recursive-depth",
type=int,
default=3,
help="Recursive depth for graph generation (default: 3)",
)
parser.add_argument(
"--runs", type=int, default=5, help="Number of benchmark runs (default: 5)"
)
args = parser.parse_args() args = parser.parse_args()
society = create_organization_recursive( society = create_organization_recursive(
"society", "Society", PERSON_NAMES, args.recursive_depth "society", "Society", PERSON_NAMES, args.recursive_depth
) )
@ -28,10 +32,12 @@ if __name__ == "__main__":
results = benchmark_function(get_graph_from_model, society, num_runs=args.runs) results = benchmark_function(get_graph_from_model, society, num_runs=args.runs)
print("\nBenchmark Results:") print("\nBenchmark Results:")
print(f"N nodes: {len(nodes)}, N edges: {len(edges)}, Recursion depth: {args.recursive_depth}") print(
f"N nodes: {len(nodes)}, N edges: {len(edges)}, Recursion depth: {args.recursive_depth}"
)
print(f"Mean Peak Memory: {results['mean_peak_memory_mb']:.2f} MB") print(f"Mean Peak Memory: {results['mean_peak_memory_mb']:.2f} MB")
print(f"Mean CPU Usage: {results['mean_cpu_percent']:.2f}%") print(f"Mean CPU Usage: {results['mean_cpu_percent']:.2f}%")
print(f"Mean Execution Time: {results['mean_execution_time']:.4f} seconds") print(f"Mean Execution Time: {results['mean_execution_time']:.4f} seconds")
if 'std_execution_time' in results: if "std_execution_time" in results:
print(f"Execution Time Std: {results['std_execution_time']:.4f} seconds") print(f"Execution Time Std: {results['std_execution_time']:.4f} seconds")