From 649fcf2ba815e2809eed060c9038c9cc8963c935 Mon Sep 17 00:00:00 2001 From: vasilije Date: Sun, 5 Jan 2025 19:21:09 +0100 Subject: [PATCH] Fix linter issues --- alembic/env.py | 3 +- cognee/api/client.py | 36 +++++++++---------- cognee/api/v1/cognify/code_graph_pipeline.py | 2 +- .../hybrid/falkordb/FalkorDBAdapter.py | 4 +-- cognee/infrastructure/llm/openai/adapter.py | 2 +- .../data/extraction/extract_topics_naive.py | 2 +- cognee/tasks/repo_processor/__init__.py | 7 ++-- evals/generate_test_set.py | 6 ++-- .../benchmark_function.py | 4 +-- 9 files changed, 31 insertions(+), 35 deletions(-) diff --git a/alembic/env.py b/alembic/env.py index beed7aa04..caeae0fdc 100644 --- a/alembic/env.py +++ b/alembic/env.py @@ -4,7 +4,7 @@ from logging.config import fileConfig from sqlalchemy import pool from sqlalchemy.engine import Connection from sqlalchemy.ext.asyncio import async_engine_from_config - +from cognee.infrastructure.databases.relational import Base from alembic import context # this is the Alembic Config object, which provides @@ -20,7 +20,6 @@ if config.config_file_name is not None: # for 'autogenerate' support # from myapp import mymodel # target_metadata = mymodel.Base.metadata -from cognee.infrastructure.databases.relational import Base target_metadata = Base.metadata diff --git a/cognee/api/client.py b/cognee/api/client.py index a64b1ed08..02ab94cb0 100644 --- a/cognee/api/client.py +++ b/cognee/api/client.py @@ -7,9 +7,25 @@ import sentry_sdk from fastapi import FastAPI, status from fastapi.responses import JSONResponse, Response from fastapi.middleware.cors import CORSMiddleware - +from cognee.api.v1.permissions.routers import get_permissions_router +from cognee.api.v1.settings.routers import get_settings_router +from cognee.api.v1.datasets.routers import get_datasets_router +from cognee.api.v1.cognify.routers import get_cognify_router +from cognee.api.v1.search.routers import get_search_router +from cognee.api.v1.add.routers import get_add_router +from fastapi import Request +from fastapi.encoders import jsonable_encoder +from fastapi.exceptions import RequestValidationError from cognee.exceptions import CogneeApiError from traceback import format_exc +from cognee.api.v1.users.routers import ( + get_auth_router, + get_register_router, + get_reset_password_router, + get_verify_router, + get_users_router, +) +from contextlib import asynccontextmanager # Set up logging logging.basicConfig( @@ -25,7 +41,6 @@ if os.getenv("ENV", "prod") == "prod": profiles_sample_rate=1.0, ) -from contextlib import asynccontextmanager app_environment = os.getenv("ENV", "prod") @@ -58,23 +73,6 @@ app.add_middleware( allow_headers=["*"], ) -from cognee.api.v1.users.routers import ( - get_auth_router, - get_register_router, - get_reset_password_router, - get_verify_router, - get_users_router, -) -from cognee.api.v1.permissions.routers import get_permissions_router -from cognee.api.v1.settings.routers import get_settings_router -from cognee.api.v1.datasets.routers import get_datasets_router -from cognee.api.v1.cognify.routers import get_cognify_router -from cognee.api.v1.search.routers import get_search_router -from cognee.api.v1.add.routers import get_add_router - -from fastapi import Request -from fastapi.encoders import jsonable_encoder -from fastapi.exceptions import RequestValidationError @app.exception_handler(RequestValidationError) diff --git a/cognee/api/v1/cognify/code_graph_pipeline.py b/cognee/api/v1/cognify/code_graph_pipeline.py index 813cef927..405cb0b40 100644 --- a/cognee/api/v1/cognify/code_graph_pipeline.py +++ b/cognee/api/v1/cognify/code_graph_pipeline.py @@ -21,12 +21,12 @@ from cognee.tasks.repo_processor import ( ) from cognee.tasks.repo_processor.get_source_code_chunks import get_source_code_chunks from cognee.tasks.storage import add_data_points +from cognee.tasks.summarization import summarize_code, summarize_text monitoring = get_base_config().monitoring_tool if monitoring == MonitoringTool.LANGFUSE: from langfuse.decorators import observe -from cognee.tasks.summarization import summarize_code, summarize_text logger = logging.getLogger("code_graph_pipeline") update_status_lock = asyncio.Lock() diff --git a/cognee/infrastructure/databases/hybrid/falkordb/FalkorDBAdapter.py b/cognee/infrastructure/databases/hybrid/falkordb/FalkorDBAdapter.py index 56c9365c8..871bae4f0 100644 --- a/cognee/infrastructure/databases/hybrid/falkordb/FalkorDBAdapter.py +++ b/cognee/infrastructure/databases/hybrid/falkordb/FalkorDBAdapter.py @@ -217,7 +217,7 @@ class FalkorDBAdapter(VectorDBInterface, GraphDBInterface): async def retrieve(self, data_point_ids: list[UUID]): result = self.query( - f"MATCH (node) WHERE node.id IN $node_ids RETURN node", + "MATCH (node) WHERE node.id IN $node_ids RETURN node", { "node_ids": [str(data_point) for data_point in data_point_ids], }, @@ -343,7 +343,7 @@ class FalkorDBAdapter(VectorDBInterface, GraphDBInterface): async def delete_data_points(self, collection_name: str, data_point_ids: list[UUID]): return self.query( - f"MATCH (node) WHERE node.id IN $node_ids DETACH DELETE node", + "MATCH (node) WHERE node.id IN $node_ids DETACH DELETE node", { "node_ids": [str(data_point) for data_point in data_point_ids], }, diff --git a/cognee/infrastructure/llm/openai/adapter.py b/cognee/infrastructure/llm/openai/adapter.py index 340fa392d..c452924c9 100644 --- a/cognee/infrastructure/llm/openai/adapter.py +++ b/cognee/infrastructure/llm/openai/adapter.py @@ -42,7 +42,7 @@ class OpenAIAdapter(LLMInterface): self.endpoint = endpoint self.api_version = api_version self.streaming = streaming - base_config = get_base_config() + @observe(as_type="generation") async def acreate_structured_output( diff --git a/cognee/modules/data/extraction/extract_topics_naive.py b/cognee/modules/data/extraction/extract_topics_naive.py index 98685a0e2..d1323e24b 100644 --- a/cognee/modules/data/extraction/extract_topics_naive.py +++ b/cognee/modules/data/extraction/extract_topics_naive.py @@ -51,7 +51,7 @@ def remove_stop_words(text: str): stop_words = set(stopwords.words("english")) text = text.split() - text = [word for word in text if not word in stop_words] + text = [word for word in text if word not in stop_words] return " ".join(text) diff --git a/cognee/tasks/repo_processor/__init__.py b/cognee/tasks/repo_processor/__init__.py index fa754028e..6dc032547 100644 --- a/cognee/tasks/repo_processor/__init__.py +++ b/cognee/tasks/repo_processor/__init__.py @@ -1,8 +1,7 @@ -import logging - -logger = logging.getLogger("task:repo_processor") - from .enrich_dependency_graph import enrich_dependency_graph from .expand_dependency_graph import expand_dependency_graph from .get_non_code_files import get_data_list_for_user, get_non_py_files from .get_repo_file_dependencies import get_repo_file_dependencies +import logging + +logger = logging.getLogger("task:repo_processor") diff --git a/evals/generate_test_set.py b/evals/generate_test_set.py index 923233223..92c74bd1e 100644 --- a/evals/generate_test_set.py +++ b/evals/generate_test_set.py @@ -33,14 +33,14 @@ print(dataset.goldens) print(dataset) -import pytest -from deepeval import assert_test +# import pytest +# from deepeval import assert_test from deepeval.metrics import AnswerRelevancyMetric answer_relevancy_metric = AnswerRelevancyMetric(threshold=0.5) -from deepeval import evaluate +# from deepeval import evaluate # evaluate(dataset, [answer_relevancy_metric]) diff --git a/profiling/graph_pydantic_conversion/benchmark_function.py b/profiling/graph_pydantic_conversion/benchmark_function.py index 58990cc31..d80bafab1 100644 --- a/profiling/graph_pydantic_conversion/benchmark_function.py +++ b/profiling/graph_pydantic_conversion/benchmark_function.py @@ -33,7 +33,7 @@ def benchmark_function(func: Callable, *args, num_runs: int = 5) -> Dict[str, An start_time = time.perf_counter() start_cpu_time = process.cpu_times() - result = func(*args) + end_cpu_time = process.cpu_times() end_time = time.perf_counter() @@ -45,7 +45,7 @@ def benchmark_function(func: Callable, *args, num_runs: int = 5) -> Dict[str, An ) current, peak = tracemalloc.get_traced_memory() final_memory = process.memory_info().rss - memory_used = final_memory - initial_memory + # Store results execution_times.append(execution_time)