fix: Add reraising of general exception handling in cognee [COG-1062] (#490)
<!-- .github/pull_request_template.md --> ## Description Add re-raising of errors in general exception handling ## DCO Affirmation I affirm that all code in every commit of this pull request conforms to the terms of the Topoteretes Developer Certificate of Origin <!-- This is an auto-generated comment: release notes by coderabbit.ai --> ## Summary by CodeRabbit - **Bug Fixes & Stability Improvements** - Enhanced error handling throughout the system, ensuring issues during operations like server startup, data processing, and graph management are properly logged and reported. - **Refactor** - Standardized logging practices replace basic output statements, improving traceability and providing better insights for troubleshooting. - **New Features** - Updated search functionality now returns only unique results, enhancing data consistency and the overall user experience. <!-- end of auto-generated comment: release notes by coderabbit.ai --> --------- Co-authored-by: holchan <61059652+holchan@users.noreply.github.com> Co-authored-by: Boris <boris@topoteretes.com>
This commit is contained in:
parent
4d3acc358a
commit
1260fc7db0
8 changed files with 43 additions and 15 deletions
|
|
@ -188,6 +188,7 @@ def start_api_server(host: str = "0.0.0.0", port: int = 8000):
|
|||
except Exception as e:
|
||||
logger.exception(f"Failed to start server: {e}")
|
||||
# Here you could add any cleanup code or error recovery code.
|
||||
raise e
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ from cognee.infrastructure.engine import DataPoint
|
|||
from cognee.modules.storage.utils import JSONEncoder
|
||||
import numpy as np
|
||||
|
||||
logger = logging.getLogger("NetworkXAdapter")
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NetworkXAdapter(GraphDBInterface):
|
||||
|
|
@ -270,8 +270,8 @@ class NetworkXAdapter(GraphDBInterface):
|
|||
if not isinstance(node["id"], UUID):
|
||||
node["id"] = UUID(node["id"])
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
logger.error(e)
|
||||
raise e
|
||||
|
||||
if isinstance(node.get("updated_at"), int):
|
||||
node["updated_at"] = datetime.fromtimestamp(
|
||||
|
|
@ -299,8 +299,8 @@ class NetworkXAdapter(GraphDBInterface):
|
|||
edge["source_node_id"] = source_id
|
||||
edge["target_node_id"] = target_id
|
||||
except Exception as e:
|
||||
print(e)
|
||||
pass
|
||||
logger.error(e)
|
||||
raise e
|
||||
|
||||
if isinstance(edge["updated_at"], int): # Handle timestamp in milliseconds
|
||||
edge["updated_at"] = datetime.fromtimestamp(
|
||||
|
|
@ -328,8 +328,9 @@ class NetworkXAdapter(GraphDBInterface):
|
|||
|
||||
await self.save_graph_to_file(file_path)
|
||||
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
logger.error("Failed to load graph from file: %s", file_path)
|
||||
raise e
|
||||
|
||||
async def delete_graph(self, file_path: str = None):
|
||||
"""Asynchronously delete the graph file from the filesystem."""
|
||||
|
|
@ -345,6 +346,7 @@ class NetworkXAdapter(GraphDBInterface):
|
|||
logger.info("Graph deleted successfully.")
|
||||
except Exception as error:
|
||||
logger.error("Failed to delete graph: %s", error)
|
||||
raise error
|
||||
|
||||
async def get_filtered_graph_data(
|
||||
self, attribute_filters: List[Dict[str, List[Union[str, int]]]]
|
||||
|
|
|
|||
|
|
@ -303,9 +303,10 @@ class SQLAlchemyAdapter:
|
|||
await connection.execute(text("DROP TABLE IF EXISTS group_permission CASCADE"))
|
||||
await connection.execute(text("DROP TABLE IF EXISTS permissions CASCADE"))
|
||||
# Add more DROP TABLE statements for other tables as needed
|
||||
print("Database tables dropped successfully.")
|
||||
logger.debug("Database tables dropped successfully.")
|
||||
except Exception as e:
|
||||
print(f"Error dropping database tables: {e}")
|
||||
logger.error(f"Error dropping database tables: {e}")
|
||||
raise e
|
||||
|
||||
async def create_database(self):
|
||||
if self.engine.dialect.name == "sqlite":
|
||||
|
|
@ -340,6 +341,7 @@ class SQLAlchemyAdapter:
|
|||
await connection.execute(drop_table_query)
|
||||
metadata.clear()
|
||||
except Exception as e:
|
||||
print(f"Error deleting database: {e}")
|
||||
logger.error(f"Error deleting database: {e}")
|
||||
raise e
|
||||
|
||||
print("Database deleted successfully.")
|
||||
logger.info("Database deleted successfully.")
|
||||
|
|
|
|||
|
|
@ -1,10 +1,12 @@
|
|||
from typing import Optional
|
||||
import logging
|
||||
from uuid import NAMESPACE_OID, uuid5
|
||||
|
||||
from cognee.tasks.chunks import chunk_by_paragraph
|
||||
|
||||
from .models.DocumentChunk import DocumentChunk
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TextChunker:
|
||||
document = None
|
||||
|
|
@ -76,7 +78,8 @@ class TextChunker:
|
|||
},
|
||||
)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
logger.error(e)
|
||||
raise e
|
||||
paragraph_chunks = [chunk_data]
|
||||
self.chunk_size = chunk_data["word_count"]
|
||||
self.token_count = chunk_data["token_count"]
|
||||
|
|
@ -97,4 +100,5 @@ class TextChunker:
|
|||
_metadata={"index_fields": ["text"]},
|
||||
)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
logger.error(e)
|
||||
raise e
|
||||
|
|
|
|||
|
|
@ -34,5 +34,6 @@ async def detect_language(text: str):
|
|||
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error: {e}")
|
||||
raise e
|
||||
|
||||
return None
|
||||
|
|
|
|||
|
|
@ -152,6 +152,7 @@ class CogneeGraph(CogneeAbstractGraph):
|
|||
|
||||
except Exception as ex:
|
||||
print(f"Error mapping vector distances to edges: {ex}")
|
||||
raise ex
|
||||
|
||||
async def calculate_top_triplet_importances(self, k: int) -> List:
|
||||
min_heap = []
|
||||
|
|
|
|||
|
|
@ -11,6 +11,8 @@ from cognee.shared.utils import send_telemetry
|
|||
from cognee.modules.search.methods import search
|
||||
from cognee.infrastructure.llm.get_llm_client import get_llm_client
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def code_description_to_code_part_search(
|
||||
query: str, include_docs=False, user: User = None, top_k=5
|
||||
|
|
@ -154,8 +156,9 @@ if __name__ == "__main__":
|
|||
user = None
|
||||
try:
|
||||
results = await code_description_to_code_part_search(query, user)
|
||||
print("Retrieved Code Parts:", results)
|
||||
logger.debug("Retrieved Code Parts:", results)
|
||||
except Exception as e:
|
||||
print(f"An error occurred: {e}")
|
||||
logger.error(f"An error occurred: {e}")
|
||||
raise e
|
||||
|
||||
asyncio.run(main())
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ import asyncio
|
|||
import sys
|
||||
from contextlib import contextmanager
|
||||
from pathlib import Path
|
||||
from pickle import UnpicklingError
|
||||
from typing import List, Dict, Optional
|
||||
|
||||
import aiofiles
|
||||
|
|
@ -60,9 +61,22 @@ def _update_code_entity(script: jedi.Script, code_entity: Dict[str, any]) -> Non
|
|||
code_entity["full_name"] = getattr(result, "full_name", None)
|
||||
code_entity["module_name"] = getattr(result, "module_name", None)
|
||||
code_entity["module_path"] = getattr(result, "module_path", None)
|
||||
except KeyError as e:
|
||||
# TODO: See if there is a way to handle KeyError properly
|
||||
logger.error(f"Failed to analyze code entity {code_entity['name']}: {e}")
|
||||
return
|
||||
except UnpicklingError as e:
|
||||
# TODO: See if there is a way to handle UnpicklingError properly
|
||||
logger.error(f"Failed to analyze code entity {code_entity['name']}: {e}")
|
||||
return
|
||||
except EOFError as e:
|
||||
# TODO: See if there is a way to handle EOFError properly
|
||||
logger.error(f"Failed to analyze code entity {code_entity['name']}: {e}")
|
||||
return
|
||||
except Exception as e:
|
||||
# logging.warning(f"Failed to analyze code entity {code_entity['name']}: {e}")
|
||||
logger.error(f"Failed to analyze code entity {code_entity['name']}: {e}")
|
||||
raise e
|
||||
|
||||
|
||||
async def _extract_dependencies(script_path: str) -> List[str]:
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue