Fix/fix broken health check endpoint (#1516)

<!-- .github/pull_request_template.md -->

## Description
<!--
Please provide a clear, human-generated description of the changes in
this PR.
DO NOT use AI-generated descriptions. We want to understand your thought
process and reasoning.
-->

Cognee health check endpoint, specifically `llm_provider` is broken.

To reproduce: `cognee-cli -ui`:

```
[BACKEND] 2025-10-07T20:05:51.751303 [info     ] JSON extension already loaded or unavailable: Binder exception: Extension: JSON is already loaded. You can check loaded extensions by `CALL SHOW_LOADED_EXTENSIONS() RETURN *`. [cognee.shared.logging_utils]
[BACKEND] 2025-10-07T20:05:51.763217 [error    ] LLM provider health check failed: type object 'LLMGateway' has no attribute 'show_prompt' [cognee.shared.logging_utils] exception_message="type object 'LLMGateway' has no attribute 'show_prompt'" traceback=True
[BACKEND] ╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮
[BACKEND] │ /Users/daulet/Desktop/dev/cognee-claude/cognee/api/health.py:197 in check_llm_provider           │
[BACKEND] │                                                                                                  │
[BACKEND] │   194 │   │   │   config = get_llm_config()                                                      │
[BACKEND] │   195 │   │   │                                                                                  │
[BACKEND] │   196 │   │   │   # Test actual API connection with minimal request                              │
[BACKEND] │ ❱ 197 │   │   │   LLMGateway.show_prompt("test", "test.txt")                                     │
[BACKEND] │   198 │   │   │                                                                                  │
[BACKEND] │   199 │   │   │   response_time = int((time.time() - start_time) * 1000)                         │
[BACKEND] │   200 │   │   │   return ComponentHealth(                                                        │
[BACKEND] │                                                                                                  │
[BACKEND] │ ╭─────────────────────────────────────────── locals ───────────────────────────────────────────╮ │
[BACKEND] │ │         config = LLMConfig(                                                                  │ │
[BACKEND] │ │                  │   structured_output_framework='instructor',                               │ │
[BACKEND] │ │                  │   llm_provider='openai',                                                  │ │
[BACKEND] │ │                  │   llm_model='openai/gpt-4o-mini',                                         │ │
[BACKEND] │ │                  │   llm_endpoint='',                                                        │ │
[BACKEND] │ │                  │                                                                           │ │
[BACKEND] │ │                  llm_api_key='sk-proj-UIN_gS06M_6Zy-j4ABRZ6xrr0XR6tV5HyeaRvX4Aq5R3aJ1NNT0XP… │ │
[BACKEND] │ │                  │   llm_api_version=None,                                                   │ │
[BACKEND] │ │                  │   llm_temperature=0.0,                                                    │ │
[BACKEND] │ │                  │   llm_streaming=False,                                                    │ │
[BACKEND] │ │                  │   llm_max_completion_tokens=16384,                                        │ │
[BACKEND] │ │                  │   baml_llm_provider='openai',                                             │ │
[BACKEND] │ │                  │   baml_llm_model='gpt-5-mini',                                            │ │
[BACKEND] │ │                  │   baml_llm_endpoint='',                                                   │ │
[BACKEND] │ │                  │   baml_llm_api_key=None,                                                  │ │
[BACKEND] │ │                  │   baml_llm_temperature=0.0,                                               │ │
[BACKEND] │ │                  │   baml_llm_api_version='',                                                │ │
[BACKEND] │ │                  │   transcription_model='whisper-1',                                        │ │
[BACKEND] │ │                  │   graph_prompt_path='generate_graph_prompt.txt',                          │ │
[BACKEND] │ │                  │   temporal_graph_prompt_path='generate_event_graph_prompt.txt',           │ │
[BACKEND] │ │                  │   event_entity_prompt_path='generate_event_entity_prompt.txt',            │ │
[BACKEND] │ │                  │   llm_rate_limit_enabled=False,                                           │ │
[BACKEND] │ │                  │   llm_rate_limit_requests=60,                                             │ │
[BACKEND] │ │                  │   llm_rate_limit_interval=60,                                             │ │
[BACKEND] │ │                  │   embedding_rate_limit_enabled=False,                                     │ │
[BACKEND] │ │                  │   embedding_rate_limit_requests=60,                                       │ │
[BACKEND] │ │                  │   embedding_rate_limit_interval=60,                                       │ │
[BACKEND] │ │                  │   fallback_api_key='',                                                    │ │
[BACKEND] │ │                  │   fallback_endpoint='',                                                   │ │
[BACKEND] │ │                  │   fallback_model='',                                                      │ │
[BACKEND] │ │                  │   baml_registry=None,                                                     │ │
[BACKEND] │ │                  │                                                                           │ │
[BACKEND] │ │                  cognee_cloud_auth_token='9611fd02214a51f0930ca0d6d445daea63bc5392d85746fb'  │ │
[BACKEND] │ │                  )                                                                           │ │
[BACKEND] │ │              e = AttributeError("type object 'LLMGateway' has no attribute 'show_prompt'")   │ │
[BACKEND] │ │ get_llm_config = <functools._lru_cache_wrapper object at 0x123954f60>                        │ │
[BACKEND] │ │  response_time = 0                                                                           │ │
[BACKEND] │ │           self = <cognee.api.health.HealthChecker object at 0x33f442980>                     │ │
[BACKEND] │ │     start_time = 1759867551.7631629                                                          │ │
[BACKEND] │ ╰──────────────────────────────────────────────────────────────────────────────────────────────╯ │
[BACKEND] ╰──────────────────────────────────────────────────────────────────────────────────────────────────╯
[BACKEND] AttributeError: type object 'LLMGateway' has no attribute 'show_prompt'
[BACKEND] EmbeddingRateLimiter initialized: enabled=False, requests_limit=60, interval_seconds=60
```

This is due to deprecated LLMGateway method that is no longer there,
which was used in healthcheck.

## Type of Change
<!-- Please check the relevant option -->
- [x] Bug fix (non-breaking change that fixes an issue)
- [ ] New feature (non-breaking change that adds functionality)
- [ ] Breaking change (fix or feature that would cause existing
functionality to change)
- [ ] Documentation update
- [ ] Code refactoring
- [ ] Performance improvement
- [ ] Other (please specify):

## Screenshots/Videos (if applicable)
<!-- Add screenshots or videos to help explain your changes -->

## Pre-submission Checklist
<!-- Please check all boxes that apply before submitting your PR -->
- [ ] **I have tested my changes thoroughly before submitting this PR**
- [ ] **This PR contains minimal changes necessary to address the
issue/feature**
- [ ] My code follows the project's coding standards and style
guidelines
- [ ] I have added tests that prove my fix is effective or that my
feature works
- [ ] I have added necessary documentation (if applicable)
- [ ] All new and existing tests pass
- [ ] I have searched existing PRs to ensure this change hasn't been
submitted already
- [ ] I have linked any relevant issues in the description
- [ ] My commits have clear and descriptive messages

## DCO Affirmation
I affirm that all code in every commit of this pull request conforms to
the terms of the Topoteretes Developer Certificate of Origin.
This commit is contained in:
Daulet Amirkhanov 2025-10-07 22:19:09 +01:00 committed by GitHub
commit 4190a7a232
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -189,12 +189,12 @@ class HealthChecker:
start_time = time.time() start_time = time.time()
try: try:
from cognee.infrastructure.llm.config import get_llm_config from cognee.infrastructure.llm.config import get_llm_config
from cognee.infrastructure.llm import LLMGateway
config = get_llm_config() config = get_llm_config()
# Test actual API connection with minimal request from cognee.infrastructure.llm.utils import test_llm_connection
LLMGateway.show_prompt("test", "test.txt")
await test_llm_connection()
response_time = int((time.time() - start_time) * 1000) response_time = int((time.time() - start_time) * 1000)
return ComponentHealth( return ComponentHealth(
@ -217,13 +217,9 @@ class HealthChecker:
"""Check embedding service health (non-critical).""" """Check embedding service health (non-critical)."""
start_time = time.time() start_time = time.time()
try: try:
from cognee.infrastructure.databases.vector.embeddings.get_embedding_engine import ( from cognee.infrastructure.llm.utils import test_embedding_connection
get_embedding_engine,
)
# Test actual embedding generation with minimal text await test_embedding_connection()
engine = get_embedding_engine()
await engine.embed_text(["test"])
response_time = int((time.time() - start_time) * 1000) response_time = int((time.time() - start_time) * 1000)
return ComponentHealth( return ComponentHealth(