diff --git a/.github/workflows/test_s3_file_storage.yml b/.github/workflows/test_s3_file_storage.yml index a477d8933..04d140513 100644 --- a/.github/workflows/test_s3_file_storage.yml +++ b/.github/workflows/test_s3_file_storage.yml @@ -6,6 +6,10 @@ on: permissions: contents: read +env: + RUNTIME__LOG_LEVEL: ERROR + ENV: 'dev' + jobs: test-gemini: name: Run S3 File Storage Test diff --git a/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/generic_llm_api/adapter.py b/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/generic_llm_api/adapter.py index 8c15a5804..917599d4d 100644 --- a/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/generic_llm_api/adapter.py +++ b/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/generic_llm_api/adapter.py @@ -118,7 +118,7 @@ class GenericAPIAdapter(LLMInterface): if not (self.fallback_model and self.fallback_api_key and self.fallback_endpoint): raise ContentPolicyFilterError( f"The provided input contains content that is not aligned with our content policy: {text_input}" - ) + ) from error try: return await self.aclient.chat.completions.create( @@ -151,4 +151,4 @@ class GenericAPIAdapter(LLMInterface): else: raise ContentPolicyFilterError( f"The provided input contains content that is not aligned with our content policy: {text_input}" - ) + ) from error diff --git a/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/openai/adapter.py b/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/openai/adapter.py index 989b240ac..527f64d75 100644 --- a/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/openai/adapter.py +++ b/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/openai/adapter.py @@ -146,11 +146,11 @@ class OpenAIAdapter(LLMInterface): ContentFilterFinishReasonError, ContentPolicyViolationError, InstructorRetryException, - ): + ) as e: if not (self.fallback_model and self.fallback_api_key): raise ContentPolicyFilterError( f"The provided input contains content that is not aligned with our content policy: {text_input}" - ) + ) from e try: return await self.aclient.chat.completions.create( @@ -183,7 +183,7 @@ class OpenAIAdapter(LLMInterface): else: raise ContentPolicyFilterError( f"The provided input contains content that is not aligned with our content policy: {text_input}" - ) + ) from error @observe @sleep_and_retry_sync()