diff --git a/.github/ISSUE_TEMPLATE/documentation.yml b/.github/ISSUE_TEMPLATE/documentation.yml index 417289f5b..b01459ac6 100644 --- a/.github/ISSUE_TEMPLATE/documentation.yml +++ b/.github/ISSUE_TEMPLATE/documentation.yml @@ -71,3 +71,4 @@ body: required: true - label: I have specified the location of the documentation issue required: true + diff --git a/.github/core-team.txt b/.github/core-team.txt new file mode 100644 index 000000000..2e69a268c --- /dev/null +++ b/.github/core-team.txt @@ -0,0 +1,12 @@ +# Core team GitHub logins (one per line). Lines may begin with @; case-insensitive. +borisarzentar +daukadolt +dexters1 +hajdul88 +hande-k +lxobr +pazone +siillee +vasilije1990 + + diff --git a/.github/workflows/label-core-team.yml b/.github/workflows/label-core-team.yml new file mode 100644 index 000000000..8e32923d1 --- /dev/null +++ b/.github/workflows/label-core-team.yml @@ -0,0 +1,76 @@ +name: Label PRs from core team + +on: + pull_request_target: + types: [opened, reopened, synchronize, ready_for_review, edited] + +permissions: + contents: read + issues: write + +jobs: + label-core-team: + if: ${{ !github.event.pull_request.draft }} + runs-on: ubuntu-latest + steps: + - name: Check out base repository + uses: actions/checkout@v4 + with: + repository: ${{ github.repository }} + ref: ${{ github.event.pull_request.base.ref }} + + - name: Determine if PR author is a core team member + id: check_core + shell: bash + run: | + AUTHOR="${{ github.event.pull_request.user.login }}" + LIST_FILE=".github/core-team.txt" + + if [ ! -f "$LIST_FILE" ]; then + echo "core=false" >> "$GITHUB_OUTPUT" + exit 0 + fi + + # Normalize author to lowercase and strip leading '@' + AUTHOR_NORM="$(echo "$AUTHOR" | tr '[:upper:]' '[:lower:]' | sed 's/^@//')" + + # Compare against normalized list values (ignore comments/blank lines) + if awk -v author="$AUTHOR_NORM" ' + BEGIN { found=0 } + { + line=$0 + sub(/^[ \t]+|[ \t]+$/, "", line) + if (line ~ /^#/ || line == "") next + sub(/^@/, "", line) + line=tolower(line) + if (line == author) { found=1; exit } + } + END { exit(found ? 0 : 1) } + ' "$LIST_FILE"; then + echo "core=true" >> "$GITHUB_OUTPUT" + else + echo "core=false" >> "$GITHUB_OUTPUT" + fi + + - name: Add core-team label + if: steps.check_core.outputs.core == 'true' + uses: actions/github-script@v7 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const label = 'core-team'; + const { owner, repo } = context.repo; + const prNumber = context.payload.pull_request.number; + try { + await github.rest.issues.addLabels({ + owner, + repo, + issue_number: prNumber, + labels: [label], + }); + core.info(`Label '${label}' added to PR #${prNumber}`); + } catch (error) { + core.warning(`Failed to add label: ${error.message}`); + } + + diff --git a/README.md b/README.md index 41bd1d4ea..30f829c93 100644 --- a/README.md +++ b/README.md @@ -178,7 +178,7 @@ You can also cognify your files and query using cognee UI. ### Running the UI -Try cognee UI by running ``` cognee-cli -ui ``` command on your terminal. +Try cognee UI by setting LLM_API_KEY and running ``` cognee-cli -ui ``` command on your terminal. ## Understand our architecture diff --git a/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/openai/adapter.py b/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/openai/adapter.py index da0f6c8cc..bc8707011 100644 --- a/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/openai/adapter.py +++ b/cognee/infrastructure/llm/structured_output_framework/litellm_instructor/llm/openai/adapter.py @@ -29,6 +29,9 @@ observe = get_observe() logger = get_logger() +# litellm to drop unsupported params, e.g., reasoning_effort when not supported by the model. +litellm.drop_params = True + class OpenAIAdapter(LLMInterface): """ @@ -132,39 +135,13 @@ class OpenAIAdapter(LLMInterface): api_version=self.api_version, response_model=response_model, max_retries=self.MAX_RETRIES, - extra_body={"reasoning_effort": "minimal"}, + reasoning_effort="minimal", ) except ( ContentFilterFinishReasonError, ContentPolicyViolationError, InstructorRetryException, - ) as error: - if ( - isinstance(error, InstructorRetryException) - and "content management policy" not in str(error).lower() - ): - logger.debug( - "LLM Model does not support reasoning_effort parameter, trying call without the parameter." - ) - return await self.aclient.chat.completions.create( - model=self.model, - messages=[ - { - "role": "user", - "content": f"""{text_input}""", - }, - { - "role": "system", - "content": system_prompt, - }, - ], - api_key=self.api_key, - api_base=self.endpoint, - api_version=self.api_version, - response_model=response_model, - max_retries=self.MAX_RETRIES, - ) - + ): if not (self.fallback_model and self.fallback_api_key): raise ContentPolicyFilterError( f"The provided input contains content that is not aligned with our content policy: {text_input}" @@ -246,6 +223,7 @@ class OpenAIAdapter(LLMInterface): api_base=self.endpoint, api_version=self.api_version, response_model=response_model, + reasoning_effort="minimal", max_retries=self.MAX_RETRIES, )