Merge branch 'dev' into feat/add-pdfproloader
This commit is contained in:
commit
2b8ad7213b
5 changed files with 96 additions and 29 deletions
1
.github/ISSUE_TEMPLATE/documentation.yml
vendored
1
.github/ISSUE_TEMPLATE/documentation.yml
vendored
|
|
@ -71,3 +71,4 @@ body:
|
|||
required: true
|
||||
- label: I have specified the location of the documentation issue
|
||||
required: true
|
||||
|
||||
|
|
|
|||
12
.github/core-team.txt
vendored
Normal file
12
.github/core-team.txt
vendored
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
# Core team GitHub logins (one per line). Lines may begin with @; case-insensitive.
|
||||
borisarzentar
|
||||
daukadolt
|
||||
dexters1
|
||||
hajdul88
|
||||
hande-k
|
||||
lxobr
|
||||
pazone
|
||||
siillee
|
||||
vasilije1990
|
||||
|
||||
|
||||
76
.github/workflows/label-core-team.yml
vendored
Normal file
76
.github/workflows/label-core-team.yml
vendored
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
name: Label PRs from core team
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, reopened, synchronize, ready_for_review, edited]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
label-core-team:
|
||||
if: ${{ !github.event.pull_request.draft }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out base repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ${{ github.repository }}
|
||||
ref: ${{ github.event.pull_request.base.ref }}
|
||||
|
||||
- name: Determine if PR author is a core team member
|
||||
id: check_core
|
||||
shell: bash
|
||||
run: |
|
||||
AUTHOR="${{ github.event.pull_request.user.login }}"
|
||||
LIST_FILE=".github/core-team.txt"
|
||||
|
||||
if [ ! -f "$LIST_FILE" ]; then
|
||||
echo "core=false" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Normalize author to lowercase and strip leading '@'
|
||||
AUTHOR_NORM="$(echo "$AUTHOR" | tr '[:upper:]' '[:lower:]' | sed 's/^@//')"
|
||||
|
||||
# Compare against normalized list values (ignore comments/blank lines)
|
||||
if awk -v author="$AUTHOR_NORM" '
|
||||
BEGIN { found=0 }
|
||||
{
|
||||
line=$0
|
||||
sub(/^[ \t]+|[ \t]+$/, "", line)
|
||||
if (line ~ /^#/ || line == "") next
|
||||
sub(/^@/, "", line)
|
||||
line=tolower(line)
|
||||
if (line == author) { found=1; exit }
|
||||
}
|
||||
END { exit(found ? 0 : 1) }
|
||||
' "$LIST_FILE"; then
|
||||
echo "core=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "core=false" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Add core-team label
|
||||
if: steps.check_core.outputs.core == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
const label = 'core-team';
|
||||
const { owner, repo } = context.repo;
|
||||
const prNumber = context.payload.pull_request.number;
|
||||
try {
|
||||
await github.rest.issues.addLabels({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: prNumber,
|
||||
labels: [label],
|
||||
});
|
||||
core.info(`Label '${label}' added to PR #${prNumber}`);
|
||||
} catch (error) {
|
||||
core.warning(`Failed to add label: ${error.message}`);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -178,7 +178,7 @@ You can also cognify your files and query using cognee UI.
|
|||
|
||||
### Running the UI
|
||||
|
||||
Try cognee UI by running ``` cognee-cli -ui ``` command on your terminal.
|
||||
Try cognee UI by setting LLM_API_KEY and running ``` cognee-cli -ui ``` command on your terminal.
|
||||
|
||||
## Understand our architecture
|
||||
|
||||
|
|
|
|||
|
|
@ -29,6 +29,9 @@ observe = get_observe()
|
|||
|
||||
logger = get_logger()
|
||||
|
||||
# litellm to drop unsupported params, e.g., reasoning_effort when not supported by the model.
|
||||
litellm.drop_params = True
|
||||
|
||||
|
||||
class OpenAIAdapter(LLMInterface):
|
||||
"""
|
||||
|
|
@ -132,39 +135,13 @@ class OpenAIAdapter(LLMInterface):
|
|||
api_version=self.api_version,
|
||||
response_model=response_model,
|
||||
max_retries=self.MAX_RETRIES,
|
||||
extra_body={"reasoning_effort": "minimal"},
|
||||
reasoning_effort="minimal",
|
||||
)
|
||||
except (
|
||||
ContentFilterFinishReasonError,
|
||||
ContentPolicyViolationError,
|
||||
InstructorRetryException,
|
||||
) as error:
|
||||
if (
|
||||
isinstance(error, InstructorRetryException)
|
||||
and "content management policy" not in str(error).lower()
|
||||
):
|
||||
logger.debug(
|
||||
"LLM Model does not support reasoning_effort parameter, trying call without the parameter."
|
||||
)
|
||||
return await self.aclient.chat.completions.create(
|
||||
model=self.model,
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"""{text_input}""",
|
||||
},
|
||||
{
|
||||
"role": "system",
|
||||
"content": system_prompt,
|
||||
},
|
||||
],
|
||||
api_key=self.api_key,
|
||||
api_base=self.endpoint,
|
||||
api_version=self.api_version,
|
||||
response_model=response_model,
|
||||
max_retries=self.MAX_RETRIES,
|
||||
)
|
||||
|
||||
):
|
||||
if not (self.fallback_model and self.fallback_api_key):
|
||||
raise ContentPolicyFilterError(
|
||||
f"The provided input contains content that is not aligned with our content policy: {text_input}"
|
||||
|
|
@ -246,6 +223,7 @@ class OpenAIAdapter(LLMInterface):
|
|||
api_base=self.endpoint,
|
||||
api_version=self.api_version,
|
||||
response_model=response_model,
|
||||
reasoning_effort="minimal",
|
||||
max_retries=self.MAX_RETRIES,
|
||||
)
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue