Merge branch 'dev' into fix-only-context
This commit is contained in:
commit
fbe0b6e2ce
6 changed files with 133 additions and 33 deletions
1
.github/ISSUE_TEMPLATE/documentation.yml
vendored
1
.github/ISSUE_TEMPLATE/documentation.yml
vendored
|
|
@ -71,3 +71,4 @@ body:
|
|||
required: true
|
||||
- label: I have specified the location of the documentation issue
|
||||
required: true
|
||||
|
||||
|
|
|
|||
12
.github/core-team.txt
vendored
Normal file
12
.github/core-team.txt
vendored
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
# Core team GitHub logins (one per line). Lines may begin with @; case-insensitive.
|
||||
borisarzentar
|
||||
daukadolt
|
||||
dexters1
|
||||
hajdul88
|
||||
hande-k
|
||||
lxobr
|
||||
pazone
|
||||
siillee
|
||||
vasilije1990
|
||||
|
||||
|
||||
76
.github/workflows/label-core-team.yml
vendored
Normal file
76
.github/workflows/label-core-team.yml
vendored
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
name: Label PRs from core team
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, reopened, synchronize, ready_for_review, edited]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
label-core-team:
|
||||
if: ${{ !github.event.pull_request.draft }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out base repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: ${{ github.repository }}
|
||||
ref: ${{ github.event.pull_request.base.ref }}
|
||||
|
||||
- name: Determine if PR author is a core team member
|
||||
id: check_core
|
||||
shell: bash
|
||||
run: |
|
||||
AUTHOR="${{ github.event.pull_request.user.login }}"
|
||||
LIST_FILE=".github/core-team.txt"
|
||||
|
||||
if [ ! -f "$LIST_FILE" ]; then
|
||||
echo "core=false" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Normalize author to lowercase and strip leading '@'
|
||||
AUTHOR_NORM="$(echo "$AUTHOR" | tr '[:upper:]' '[:lower:]' | sed 's/^@//')"
|
||||
|
||||
# Compare against normalized list values (ignore comments/blank lines)
|
||||
if awk -v author="$AUTHOR_NORM" '
|
||||
BEGIN { found=0 }
|
||||
{
|
||||
line=$0
|
||||
sub(/^[ \t]+|[ \t]+$/, "", line)
|
||||
if (line ~ /^#/ || line == "") next
|
||||
sub(/^@/, "", line)
|
||||
line=tolower(line)
|
||||
if (line == author) { found=1; exit }
|
||||
}
|
||||
END { exit(found ? 0 : 1) }
|
||||
' "$LIST_FILE"; then
|
||||
echo "core=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "core=false" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Add core-team label
|
||||
if: steps.check_core.outputs.core == 'true'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
script: |
|
||||
const label = 'core-team';
|
||||
const { owner, repo } = context.repo;
|
||||
const prNumber = context.payload.pull_request.number;
|
||||
try {
|
||||
await github.rest.issues.addLabels({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: prNumber,
|
||||
labels: [label],
|
||||
});
|
||||
core.info(`Label '${label}' added to PR #${prNumber}`);
|
||||
} catch (error) {
|
||||
core.warning(`Failed to add label: ${error.message}`);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -178,7 +178,7 @@ You can also cognify your files and query using cognee UI.
|
|||
|
||||
### Running the UI
|
||||
|
||||
Try cognee UI by running ``` cognee-cli -ui ``` command on your terminal.
|
||||
Try cognee UI by setting LLM_API_KEY and running ``` cognee-cli -ui ``` command on your terminal.
|
||||
|
||||
## Understand our architecture
|
||||
|
||||
|
|
|
|||
|
|
@ -1277,7 +1277,6 @@ class KuzuAdapter(GraphDBInterface):
|
|||
A tuple containing a list of filtered node properties and a list of filtered edge
|
||||
properties.
|
||||
"""
|
||||
|
||||
where_clauses = []
|
||||
params = {}
|
||||
|
||||
|
|
@ -1288,16 +1287,50 @@ class KuzuAdapter(GraphDBInterface):
|
|||
params[param_name] = values
|
||||
|
||||
where_clause = " AND ".join(where_clauses)
|
||||
nodes_query = f"MATCH (n:Node) WHERE {where_clause} RETURN properties(n)"
|
||||
nodes_query = (
|
||||
f"MATCH (n:Node) WHERE {where_clause} RETURN n.id, {{properties: n.properties}}"
|
||||
)
|
||||
edges_query = f"""
|
||||
MATCH (n1:Node)-[r:EDGE]->(n2:Node)
|
||||
WHERE {where_clause.replace("n.", "n1.")} AND {where_clause.replace("n.", "n2.")}
|
||||
RETURN properties(r)
|
||||
RETURN n1.id, n2.id, r.relationship_name, r.properties
|
||||
"""
|
||||
nodes, edges = await asyncio.gather(
|
||||
self.query(nodes_query, params), self.query(edges_query, params)
|
||||
)
|
||||
return ([n[0] for n in nodes], [e[0] for e in edges])
|
||||
formatted_nodes = []
|
||||
for n in nodes:
|
||||
if n[0]:
|
||||
node_id = str(n[0])
|
||||
props = n[1]
|
||||
if props.get("properties"):
|
||||
try:
|
||||
additional_props = json.loads(props["properties"])
|
||||
props.update(additional_props)
|
||||
del props["properties"]
|
||||
except json.JSONDecodeError:
|
||||
logger.warning(f"Failed to parse properties JSON for node {node_id}")
|
||||
formatted_nodes.append((node_id, props))
|
||||
if not formatted_nodes:
|
||||
logger.warning("No nodes found in the database")
|
||||
return [], []
|
||||
|
||||
formatted_edges = []
|
||||
for e in edges:
|
||||
if e and len(e) >= 3:
|
||||
source_id = str(e[0])
|
||||
target_id = str(e[1])
|
||||
rel_type = str(e[2])
|
||||
props = {}
|
||||
if len(e) > 3 and e[3]:
|
||||
try:
|
||||
props = json.loads(e[3])
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
logger.warning(
|
||||
f"Failed to parse edge properties for {source_id}->{target_id}"
|
||||
)
|
||||
formatted_edges.append((source_id, target_id, rel_type, props))
|
||||
return formatted_nodes, formatted_edges
|
||||
|
||||
async def get_graph_metrics(self, include_optional=False) -> Dict[str, Any]:
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -29,6 +29,9 @@ observe = get_observe()
|
|||
|
||||
logger = get_logger()
|
||||
|
||||
# litellm to drop unsupported params, e.g., reasoning_effort when not supported by the model.
|
||||
litellm.drop_params = True
|
||||
|
||||
|
||||
class OpenAIAdapter(LLMInterface):
|
||||
"""
|
||||
|
|
@ -132,39 +135,13 @@ class OpenAIAdapter(LLMInterface):
|
|||
api_version=self.api_version,
|
||||
response_model=response_model,
|
||||
max_retries=self.MAX_RETRIES,
|
||||
extra_body={"reasoning_effort": "minimal"},
|
||||
reasoning_effort="minimal",
|
||||
)
|
||||
except (
|
||||
ContentFilterFinishReasonError,
|
||||
ContentPolicyViolationError,
|
||||
InstructorRetryException,
|
||||
) as error:
|
||||
if (
|
||||
isinstance(error, InstructorRetryException)
|
||||
and "content management policy" not in str(error).lower()
|
||||
):
|
||||
logger.debug(
|
||||
"LLM Model does not support reasoning_effort parameter, trying call without the parameter."
|
||||
)
|
||||
return await self.aclient.chat.completions.create(
|
||||
model=self.model,
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"""{text_input}""",
|
||||
},
|
||||
{
|
||||
"role": "system",
|
||||
"content": system_prompt,
|
||||
},
|
||||
],
|
||||
api_key=self.api_key,
|
||||
api_base=self.endpoint,
|
||||
api_version=self.api_version,
|
||||
response_model=response_model,
|
||||
max_retries=self.MAX_RETRIES,
|
||||
)
|
||||
|
||||
):
|
||||
if not (self.fallback_model and self.fallback_api_key):
|
||||
raise ContentPolicyFilterError(
|
||||
f"The provided input contains content that is not aligned with our content policy: {text_input}"
|
||||
|
|
@ -246,6 +223,7 @@ class OpenAIAdapter(LLMInterface):
|
|||
api_base=self.endpoint,
|
||||
api_version=self.api_version,
|
||||
response_model=response_model,
|
||||
reasoning_effort="minimal",
|
||||
max_retries=self.MAX_RETRIES,
|
||||
)
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue