Compare commits

...
Sign in to create a new pull request.

9 commits

Author SHA1 Message Date
vasilije
97baec5bdb Add tests for responses api 2025-05-09 09:25:42 +02:00
Dmitrii Galkin
bacea87186 Switch OpenAI to AsyncOpenAI and make text field required in default tools schema 2025-05-03 14:47:40 +04:00
Dmitrii Galkin
76d1aee425 Add user authentication dependency to create_response endpoint 2025-05-03 13:55:04 +04:00
Dmitrii Galkin
683af24f9e add OpenAI compatibility demo notebook 2025-05-03 13:54:45 +04:00
Boris
510b277593
Merge branch 'dev' into feat/openai_chat_completion_response 2025-04-30 13:00:40 +02:00
Vasilije
3a87c0bef0
Merge branch 'main' into feat/openai_chat_completion_response 2025-04-30 12:14:46 +02:00
Diego Baptista Theuerkauf
500fa9fb4f
feat: Translate README into Portuguese (#762)
<!-- .github/pull_request_template.md -->

## Description 

This branch contains the Portuguese translation of the README
(`README.pt.md`).

Happy to work on any suggestions you may have.

## DCO Affirmation
I affirm that all code in every commit of this pull request conforms to
the terms of the Topoteretes Developer Certificate of Origin.

---------

Signed-off-by: Diego B Theuerkauf <diego.theuerkauf@tuebingen.mpg.de>
Co-authored-by: Hande <159312713+hande-k@users.noreply.github.com>
Co-authored-by: Boris <boris@topoteretes.com>
Co-authored-by: Boris <borisarzentar@gmail.com>
Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com>
2025-04-30 11:26:09 +02:00
Dmitrii Galkin
2018850dff Add OpenAI-compatible chat and responses API endpoints with function calling support 2025-04-28 23:53:23 +04:00
Hande
7d7df1876e
Add available languages (#776)
<!-- .github/pull_request_template.md -->

## Description
<!-- Provide a clear description of the changes in this PR -->

## DCO Affirmation
I affirm that all code in every commit of this pull request conforms to
the terms of the Topoteretes Developer Certificate of Origin.

---------

Co-authored-by: Vasilije <8619304+Vasilije1990@users.noreply.github.com>
2025-04-26 00:03:05 +02:00
15 changed files with 1456 additions and 0 deletions

View file

@ -0,0 +1,92 @@
name: Reusable Responses API Tests
on:
workflow_call:
inputs:
python-version:
required: false
type: string
default: '3.11.x'
secrets:
LLM_PROVIDER:
required: true
LLM_MODEL:
required: true
LLM_ENDPOINT:
required: true
LLM_API_KEY:
required: true
LLM_API_VERSION:
required: true
EMBEDDING_PROVIDER:
required: true
EMBEDDING_MODEL:
required: true
EMBEDDING_ENDPOINT:
required: true
EMBEDDING_API_KEY:
required: true
EMBEDDING_API_VERSION:
required: true
env:
RUNTIME__LOG_LEVEL: ERROR
ENV: 'dev'
jobs:
responses-api-tests:
name: Run Responses API Tests
runs-on: ubuntu-22.04
env:
LLM_PROVIDER: ${{ secrets.LLM_PROVIDER }}
LLM_MODEL: ${{ secrets.LLM_MODEL }}
LLM_ENDPOINT: ${{ secrets.LLM_ENDPOINT }}
LLM_API_KEY: ${{ secrets.LLM_API_KEY }}
LLM_API_VERSION: ${{ secrets.LLM_API_VERSION }}
EMBEDDING_PROVIDER: ${{ secrets.EMBEDDING_PROVIDER }}
EMBEDDING_MODEL: ${{ secrets.EMBEDDING_MODEL }}
EMBEDDING_ENDPOINT: ${{ secrets.EMBEDDING_ENDPOINT }}
EMBEDDING_API_KEY: ${{ secrets.EMBEDDING_API_KEY }}
EMBEDDING_API_VERSION: ${{ secrets.EMBEDDING_API_VERSION }}
steps:
- name: Check out repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Cognee Setup
uses: ./.github/actions/cognee_setup
with:
python-version: ${{ inputs.python-version }}
- name: Install httpx
run: poetry run pip install httpx
- name: Start Cognee API Server
run: |
# Start API server in the background and save the process ID
poetry run python run_cognee_api_server.py --env dev &
echo "API_SERVER_PID=$!" >> $GITHUB_ENV
# Wait for the server to start
echo "Waiting for API server to start..."
sleep 10
- name: Run Basic API Tests
run: |
echo "Running basic responses API tests..."
poetry run python test_cognee_responses_api.py
- name: Run Comprehensive API Tests
run: |
echo "Running comprehensive responses API tests..."
poetry run python test_cognee_responses_api_comprehensive.py
- name: Clean up API server
if: always()
run: |
if [ -n "${{ env.API_SERVER_PID }}" ]; then
echo "Shutting down API server (PID: ${{ env.API_SERVER_PID }})..."
kill ${{ env.API_SERVER_PID }} || true
fi

View file

@ -14,6 +14,7 @@ from cognee.api.v1.cognify.routers import get_code_pipeline_router, get_cognify_
from cognee.api.v1.search.routers import get_search_router
from cognee.api.v1.add.routers import get_add_router
from cognee.api.v1.delete.routers import get_delete_router
from cognee.api.v1.responses.routers import get_responses_router
from fastapi import Request
from fastapi.encoders import jsonable_encoder
from fastapi.exceptions import RequestValidationError
@ -167,6 +168,8 @@ app.include_router(get_visualize_router(), prefix="/api/v1/visualize", tags=["vi
app.include_router(get_delete_router(), prefix="/api/v1/delete", tags=["delete"])
app.include_router(get_responses_router(), prefix="/api/v1/responses", tags=["responses"])
codegraph_routes = get_code_pipeline_router()
if codegraph_routes:
app.include_router(codegraph_routes, prefix="/api/v1/code-pipeline", tags=["code-pipeline"])

View file

@ -0,0 +1,3 @@
from cognee.api.v1.responses.routers import get_responses_router
__all__ = ["get_responses_router"]

View file

@ -0,0 +1,67 @@
DEFAULT_TOOLS = [
{
"type": "function",
"name": "search",
"description": "Search for information within the knowledge graph",
"parameters": {
"type": "object",
"properties": {
"search_query": {
"type": "string",
"description": "The query to search for in the knowledge graph",
},
"search_type": {
"type": "string",
"description": "Type of search to perform",
"enum": [
"INSIGHTS",
"CODE",
"GRAPH_COMPLETION",
"SEMANTIC",
"NATURAL_LANGUAGE",
],
},
"top_k": {
"type": "integer",
"description": "Maximum number of results to return",
"default": 10,
},
"datasets": {
"type": "array",
"items": {"type": "string"},
"description": "Optional list of dataset names to search within",
},
},
"required": ["search_query"],
},
},
{
"type": "function",
"name": "cognify_text",
"description": "Convert text into a knowledge graph or process all added content",
"parameters": {
"type": "object",
"properties": {
"text": {
"type": "string",
"description": "Text content to be converted into a knowledge graph",
},
"graph_model_name": {
"type": "string",
"description": "Name of the graph model to use",
},
"graph_model_file": {
"type": "string",
"description": "Path to a custom graph model file",
},
},
"required": ["text"],
},
},
# Commented as dangerous
# {
# "type": "function",
# "name": "prune",
# "description": "Prune memory",
# },
]

View file

@ -0,0 +1,107 @@
import json
import logging
from typing import Any, Dict, Union
from cognee.api.v1.responses.models import ToolCall
from cognee.modules.search.types import SearchType
from cognee.api.v1.add import add
from cognee.api.v1.search import search
from cognee.api.v1.cognify import cognify
from cognee.api.v1.prune import prune
from cognee.modules.users.methods import get_default_user
from cognee.api.v1.responses.default_tools import DEFAULT_TOOLS
logger = logging.getLogger(__name__)
async def dispatch_function(tool_call: Union[ToolCall, Dict[str, Any]]) -> str:
"""
Dispatches a function call to the appropriate Cognee function.
"""
if isinstance(tool_call, dict):
function_data = tool_call.get("function", {})
function_name = function_data.get("name", "")
arguments_str = function_data.get("arguments", "{}")
else:
function_name = tool_call.function.name
arguments_str = tool_call.function.arguments
arguments = json.loads(arguments_str)
logger.info(f"Dispatching function: {function_name} with args: {arguments}")
user = await get_default_user()
if function_name == "search":
return await handle_search(arguments, user)
elif function_name == "cognify_text":
return await handle_cognify(arguments, user)
elif function_name == "prune":
return await handle_prune(arguments, user)
else:
return f"Error: Unknown function {function_name}"
async def handle_search(arguments: Dict[str, Any], user) -> list:
"""Handle search function call"""
search_tool = next((tool for tool in DEFAULT_TOOLS if tool["name"] == "search"), None)
required_params = (
search_tool["parameters"].get("required", []) if search_tool else ["search_query"]
)
query = arguments.get("search_query")
if not query and "search_query" in required_params:
return "Error: Missing required 'search_query' parameter"
search_type_str = arguments.get("search_type", "GRAPH_COMPLETION")
valid_search_types = (
search_tool["parameters"]["properties"]["search_type"]["enum"]
if search_tool
else ["INSIGHTS", "CODE", "GRAPH_COMPLETION", "SEMANTIC", "NATURAL_LANGUAGE"]
)
if search_type_str not in valid_search_types:
logger.warning(f"Invalid search_type: {search_type_str}, defaulting to GRAPH_COMPLETION")
search_type_str = "GRAPH_COMPLETION"
query_type = SearchType[search_type_str]
top_k = arguments.get("top_k")
datasets = arguments.get("datasets")
system_prompt_path = arguments.get("system_prompt_path", "answer_simple_question.txt")
results = await search(
query_text=query,
query_type=query_type,
datasets=datasets,
user=user,
system_prompt_path=system_prompt_path,
top_k=top_k if isinstance(top_k, int) else 10,
)
return results
async def handle_cognify(arguments: Dict[str, Any], user) -> str:
"""Handle cognify function call"""
text = arguments.get("text")
graph_model_file = arguments.get("graph_model_file")
if text:
await add(data=text, user=user)
await cognify(user=user, ontology_file_path=graph_model_file if graph_model_file else None)
return (
"Text successfully converted into knowledge graph."
if text
else "Knowledge graph successfully updated with new information."
)
async def handle_prune(arguments: Dict[str, Any], user) -> str:
"""Handle prune function call"""
await prune()
return "Memory has been pruned successfully."

View file

@ -0,0 +1,102 @@
import time
import uuid
from typing import Any, Dict, List, Optional, Union
from pydantic import BaseModel, Field
from enum import Enum
from cognee.api.DTO import InDTO, OutDTO
class CogneeModel(str, Enum):
"""Enum for supported model types"""
COGNEEV1 = "cognee-v1"
class FunctionParameters(BaseModel):
"""JSON Schema for function parameters"""
type: str = "object"
properties: Dict[str, Dict[str, Any]]
required: Optional[List[str]] = None
class Function(BaseModel):
"""Function definition compatible with OpenAI's format"""
name: str
description: str
parameters: FunctionParameters
class ToolFunction(BaseModel):
"""Tool function wrapper (for OpenAI compatibility)"""
type: str = "function"
function: Function
class FunctionCall(BaseModel):
"""Function call made by the assistant"""
name: str
arguments: str
class ToolCall(BaseModel):
"""Tool call made by the assistant"""
id: str = Field(default_factory=lambda: f"call_{uuid.uuid4().hex}")
type: str = "function"
function: FunctionCall
class ChatUsage(BaseModel):
"""Token usage information"""
prompt_tokens: int = 0
completion_tokens: int = 0
total_tokens: int = 0
class ResponseRequest(InDTO):
"""Request body for the new responses endpoint (OpenAI Responses API format)"""
model: CogneeModel = CogneeModel.COGNEEV1
input: str
tools: Optional[List[ToolFunction]] = None
tool_choice: Optional[Union[str, Dict[str, Any]]] = "auto"
user: Optional[str] = None
temperature: Optional[float] = 1.0
max_tokens: Optional[int] = None
class ToolCallOutput(BaseModel):
"""Output of a tool call in the responses API"""
status: str = "success" # success/error
data: Optional[Dict[str, Any]] = None
class ResponseToolCall(BaseModel):
"""Tool call in a response"""
id: str = Field(default_factory=lambda: f"call_{uuid.uuid4().hex}")
type: str = "function"
function: FunctionCall
output: Optional[ToolCallOutput] = None
class ResponseResponse(OutDTO):
"""Response body for the new responses endpoint"""
id: str = Field(default_factory=lambda: f"resp_{uuid.uuid4().hex}")
created: int = Field(default_factory=lambda: int(time.time()))
model: str
object: str = "response"
status: str = "completed"
tool_calls: List[ResponseToolCall]
usage: Optional[ChatUsage] = None
metadata: Dict[str, Any] = None

View file

@ -0,0 +1,3 @@
from cognee.api.v1.responses.routers.get_responses_router import get_responses_router
__all__ = ["get_responses_router"]

View file

@ -0,0 +1,86 @@
DEFAULT_TOOLS = [
{
"type": "function",
"name": "search",
"description": "Search for information within the knowledge graph",
"parameters": {
"type": "object",
"properties": {
"search_query": {
"type": "string",
"description": "The query to search for in the knowledge graph",
},
"search_type": {
"type": "string",
"description": "Type of search to perform",
"enum": [
"INSIGHTS",
"CODE",
"GRAPH_COMPLETION",
"SEMANTIC",
"NATURAL_LANGUAGE",
],
},
"top_k": {
"type": "integer",
"description": "Maximum number of results to return",
"default": 10,
},
"datasets": {
"type": "array",
"items": {"type": "string"},
"description": "Optional list of dataset names to search within",
},
},
"required": ["search_query"],
},
},
{
"type": "function",
"name": "cognify_text",
"description": "Convert text into a knowledge graph or process all added content",
"parameters": {
"type": "object",
"properties": {
"text": {
"type": "string",
"description": "Text content to be converted into a knowledge graph",
},
"graph_model_name": {
"type": "string",
"description": "Name of the graph model to use",
},
"graph_model_file": {
"type": "string",
"description": "Path to a custom graph model file",
},
},
},
},
{
"type": "function",
"name": "prune",
"description": "Remove unnecessary or outdated information from the knowledge graph",
"parameters": {
"type": "object",
"properties": {
"prune_strategy": {
"type": "string",
"enum": ["light", "moderate", "aggressive"],
"description": "Strategy for pruning the knowledge graph",
"default": "moderate",
},
"min_confidence": {
"type": "number",
"description": "Minimum confidence score to retain (0-1)",
"minimum": 0,
"maximum": 1,
},
"older_than": {
"type": "string",
"description": "ISO date string - prune nodes older than this date",
},
},
},
},
]

View file

@ -0,0 +1,149 @@
"""
Get router for the OpenAI-compatible responses API.
"""
import logging
import uuid
from typing import Dict, List, Optional, Any
import openai
from fastapi import APIRouter, Depends
from cognee.api.v1.responses.models import (
ResponseRequest,
ResponseResponse,
ResponseToolCall,
ChatUsage,
FunctionCall,
ToolCallOutput,
)
from cognee.api.v1.responses.dispatch_function import dispatch_function
from cognee.api.v1.responses.default_tools import DEFAULT_TOOLS
from cognee.infrastructure.llm.config import get_llm_config
from cognee.modules.users.models import User
from cognee.modules.users.methods import get_authenticated_user
def get_responses_router() -> APIRouter:
"""
Returns the FastAPI router for OpenAI-compatible responses.
This implementation follows the new OpenAI Responses API format as described in:
https://platform.openai.com/docs/api-reference/responses/create
"""
router = APIRouter()
logger = logging.getLogger(__name__)
def _get_model_client():
"""
Get appropriate client based on model name
"""
llm_config = get_llm_config()
return openai.AsyncOpenAI(api_key=llm_config.llm_api_key)
async def call_openai_api_for_model(
input_text: str,
model: str,
tools: Optional[List[Dict[str, Any]]] = DEFAULT_TOOLS,
tool_choice: Any = "auto",
temperature: float = 1.0,
) -> Dict[str, Any]:
"""
Call appropriate model API based on model name
"""
# TODO: Support other models (e.g. cognee-v1-openai-gpt-3.5-turbo, etc.)
model = "gpt-4o"
client = _get_model_client()
logger.debug(f"Using model: {model}")
response = await client.responses.create(
model=model,
input=input_text,
temperature=temperature,
tools=tools,
tool_choice=tool_choice,
)
logger.info(f"Response: {response}")
return response.model_dump()
@router.post("/", response_model=ResponseResponse)
async def create_response(
request: ResponseRequest,
user: User = Depends(get_authenticated_user),
) -> ResponseResponse:
"""
OpenAI-compatible responses endpoint with function calling support
"""
# Use default tools if none provided
tools = request.tools or DEFAULT_TOOLS
# Call the API
response = await call_openai_api_for_model(
input_text=request.input,
model=request.model,
tools=tools,
tool_choice=request.tool_choice,
temperature=request.temperature,
)
# Use the response ID from the API or generate a new one
response_id = response.get("id", f"resp_{uuid.uuid4().hex}")
# Check if there are function tool calls in the output
output = response.get("output", [])
processed_tool_calls = []
# Process any function tool calls from the output
for item in output:
if isinstance(item, dict) and item.get("type") == "function_call":
# This is a tool call from the new format
function_name = item.get("name", "")
arguments_str = item.get("arguments", "{}")
call_id = item.get("call_id", f"call_{uuid.uuid4().hex}")
# Create a format the dispatcher can handle
tool_call = {
"id": call_id,
"function": {"name": function_name, "arguments": arguments_str},
"type": "function",
}
# Dispatch the function
try:
function_result = await dispatch_function(tool_call)
output_status = "success"
except Exception as e:
logger.exception(f"Error executing function {function_name}: {e}")
function_result = f"Error executing {function_name}: {str(e)}"
output_status = "error"
processed_call = ResponseToolCall(
id=call_id,
type="function",
function=FunctionCall(name=function_name, arguments=arguments_str),
output=ToolCallOutput(status=output_status, data={"result": function_result}),
)
processed_tool_calls.append(processed_call)
# Get usage data from the response if available
usage = response.get("usage", {})
# Create the response object with all processed tool calls
response_obj = ResponseResponse(
id=response_id,
model=request.model,
tool_calls=processed_tool_calls,
usage=ChatUsage(
prompt_tokens=usage.get("input_tokens", 0),
completion_tokens=usage.get("output_tokens", 0),
total_tokens=usage.get("total_tokens", 0),
),
)
return response_obj
return router

153
community/README.pt.md Normal file
View file

@ -0,0 +1,153 @@
<div align="center">
<a href="https://github.com/topoteretes/cognee">
<img src="https://raw.githubusercontent.com/topoteretes/cognee/refs/heads/dev/assets/cognee-logo-transparent.png" alt="Cognee Logo" height="60">
</a>
<br />
cognee - Memória para Agentes de IA em 5 linhas de código
<p align="center">
<a href="https://www.youtube.com/watch?v=1bezuvLwJmw&t=2s">Demonstração</a>
.
<a href="https://cognee.ai">Saiba mais</a>
·
<a href="https://discord.gg/NQPKmU5CCg">Participe do Discord</a>
</p>
[![GitHub forks](https://img.shields.io/github/forks/topoteretes/cognee.svg?style=social&label=Fork&maxAge=2592000)](https://GitHub.com/topoteretes/cognee/network/)
[![GitHub stars](https://img.shields.io/github/stars/topoteretes/cognee.svg?style=social&label=Star&maxAge=2592000)](https://GitHub.com/topoteretes/cognee/stargazers/)
[![GitHub commits](https://badgen.net/github/commits/topoteretes/cognee)](https://GitHub.com/topoteretes/cognee/commit/)
[![Github tag](https://badgen.net/github/tag/topoteretes/cognee)](https://github.com/topoteretes/cognee/tags/)
[![Downloads](https://static.pepy.tech/badge/cognee)](https://pepy.tech/project/cognee)
[![License](https://img.shields.io/github/license/topoteretes/cognee?colorA=00C586&colorB=000000)](https://github.com/topoteretes/cognee/blob/main/LICENSE)
[![Contributors](https://img.shields.io/github/contributors/topoteretes/cognee?colorA=00C586&colorB=000000)](https://github.com/topoteretes/cognee/graphs/contributors)
<a href="https://www.producthunt.com/posts/cognee?embed=true&utm_source=badge-top-post-badge&utm_medium=badge&utm_souce=badge-cognee" target="_blank"><img src="https://api.producthunt.com/widgets/embed-image/v1/top-post-badge.svg?post_id=946346&theme=light&period=daily&t=1744472480704" alt="cognee - Memory&#0032;for&#0032;AI&#0032;Agents&#0032;&#0032;in&#0032;5&#0032;lines&#0032;of&#0032;code | Product Hunt" style="width: 250px; height: 54px;" width="250" height="54" /></a>
Crie uma memória dinâmica para Agentes usando pipelines ECL (Extrair, Cognificar, Carregar) escaláveis e modulares.
Saiba mais sobre os [casos de uso](https://docs.cognee.ai/use-cases) e [avaliações](https://github.com/topoteretes/cognee/tree/main/evals)
<div style="text-align: center">
<img src="https://raw.githubusercontent.com/topoteretes/cognee/refs/heads/main/assets/cognee_benefits.png" alt="Por que cognee?" width="50%" />
</div>
</div>
## Funcionalidades
- Conecte e recupere suas conversas passadas, documentos, imagens e transcrições de áudio
- Reduza alucinações, esforço de desenvolvimento e custos
- Carregue dados em bancos de dados de grafos e vetores usando apenas Pydantic
- Transforme e organize seus dados enquanto os coleta de mais de 30 fontes diferentes
## Primeiros Passos
Dê os primeiros passos com facilidade usando um Google Colab <a href="https://colab.research.google.com/drive/1g-Qnx6l_ecHZi0IOw23rg0qC4TYvEvWZ?usp=sharing">notebook</a> ou um <a href="https://github.com/topoteretes/cognee-starter">repositório inicial</a>
## Contribuindo
Suas contribuições estão no centro de tornar este um verdadeiro projeto open source. Qualquer contribuição que você fizer será **muito bem-vinda**. Veja o [`CONTRIBUTING.md`](/CONTRIBUTING.md) para mais informações.
## 📦 Instalação
Você pode instalar o Cognee usando **pip**, **poetry**, **uv** ou qualquer outro gerenciador de pacotes Python.
### Com pip
```bash
pip install cognee
```
## 💻 Uso Básico
### Configuração
```python
import os
os.environ["LLM_API_KEY"] = "SUA_OPENAI_API_KEY"
```
Você também pode definir as variáveis criando um arquivo .env, usando o nosso <a href="https://github.com/topoteretes/cognee/blob/main/.env.template">modelo</a>.
Para usar diferentes provedores de LLM, consulte nossa <a href="https://docs.cognee.ai">documentação</a> .
### Exemplo simples
Este script executará o pipeline *default*:
```python
import cognee
import asyncio
async def main():
# Adiciona texto ao cognee
await cognee.add("Processamento de linguagem natural (NLP) é um subcampo interdisciplinar da ciência da computação e recuperação de informações.")
# Gera o grafo de conhecimento
await cognee.cognify()
# Consulta o grafo de conhecimento
results = await cognee.search("Me fale sobre NLP")
# Exibe os resultados
for result in results:
print(result)
if __name__ == '__main__':
asyncio.run(main())
```
Exemplo do output:
```
O Processamento de Linguagem Natural (NLP) é um campo interdisciplinar e transdisciplinar que envolve ciência da computação e recuperação de informações. Ele se concentra na interação entre computadores e a linguagem humana, permitindo que as máquinas compreendam e processem a linguagem natural.
```
Visualização do grafo:
<a href="https://rawcdn.githack.com/topoteretes/cognee/refs/heads/main/assets/graph_visualization.html"><img src="graph_visualization_pt.png" width="100%" alt="Visualização do Grafo"></a>
Abra no [navegador](https://rawcdn.githack.com/topoteretes/cognee/refs/heads/main/assets/graph_visualization.html).
Para um uso mais avançado, confira nossa <a href="https://docs.cognee.ai">documentação</a>.
## Entenda nossa arquitetura
<div style="text-align: center">
<img src="https://raw.githubusercontent.com/topoteretes/cognee/refs/heads/main/assets/cognee_diagram.png" alt="diagrama conceitual do cognee" width="100%" />
</div>
## Demonstrações
1. O que é memória de IA:
[Saiba mais sobre o cognee](https://github.com/user-attachments/assets/8b2a0050-5ec4-424c-b417-8269971503f0)
2. Demonstração simples do GraphRAG
[Demonstração simples do GraphRAG](https://github.com/user-attachments/assets/d80b0776-4eb9-4b8e-aa22-3691e2d44b8f)
3. Cognee com Ollama
[cognee com modelos locais](https://github.com/user-attachments/assets/8621d3e8-ecb8-4860-afb2-5594f2ee17db)
## Código de Conduta
Estamos comprometidos em tornar o open source uma experiência agradável e respeitosa para nossa comunidade. Veja o <a href="/CODE_OF_CONDUCT.md"><code>CODE_OF_CONDUCT</code></a> para mais informações.
## 💫 Contribuidores
<a href="https://github.com/topoteretes/cognee/graphs/contributors">
<img alt="contribuidores" src="https://contrib.rocks/image?repo=topoteretes/cognee"/>
</a>
## Histórico de Estrelas
[![Gráfico de Histórico de Estrelas](https://api.star-history.com/svg?repos=topoteretes/cognee&type=Date)](https://star-history.com/#topoteretes/cognee&Date)

Binary file not shown.

After

Width:  |  Height:  |  Size: 603 KiB

View file

@ -0,0 +1,109 @@
{
"cells": [
{
"cell_type": "code",
"metadata": {
"ExecuteTime": {
"end_time": "2025-05-03T09:53:28.509066Z",
"start_time": "2025-05-03T09:52:53.464324Z"
}
},
"source": [
"from openai import OpenAI\n",
"\n",
"# Use /api/v1/auth/login to get JWT\n",
"\n",
"client = OpenAI(api_key=\"COGNEE_API_KEY\", base_url=\"http://localhost:8000/api/v1/\")\n",
"\n",
"client.responses.create(\n",
" model=\"cognee-v1\",\n",
" input=\"Cognify: Natural language processing (NLP) is an interdisciplinary subfield of computer science and information retrieval.\",\n",
")"
],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"\u001B[1mHTTP Request: POST http://localhost:8000/api/v1/responses \"HTTP/1.1 307 Temporary Redirect\"\u001B[0m\n",
"\u001B[1mHTTP Request: POST http://localhost:8000/api/v1/responses/ \"HTTP/1.1 200 OK\"\u001B[0m"
]
},
{
"data": {
"text/plain": [
"Response(id='resp_6815e775ed1c8191a7c9e5cd7d18dd6d01678e9c98ad3fea', created_at=None, error=None, incomplete_details=None, instructions=None, metadata=None, model='cognee-v1', object='response', output=None, parallel_tool_calls=None, temperature=None, tool_choice=None, tools=None, top_p=None, max_output_tokens=None, previous_response_id=None, reasoning=None, status='completed', text=None, truncation=None, usage=ResponseUsage(input_tokens=None, output_tokens=None, output_tokens_details=None, total_tokens=242, prompt_tokens=207, completion_tokens=35), user=None, created=1746266008, toolCalls=[{'id': 'call_VPk6HLbJClOEeznVbLe5l5zJ', 'type': 'function', 'function': {'name': 'cognify_text', 'arguments': '{\"text\":\"Natural language processing (NLP) is an interdisciplinary subfield of computer science and information retrieval.\"}'}, 'output': {'status': 'success', 'data': {'result': 'Text successfully converted into knowledge graph.'}}}])"
]
},
"execution_count": 29,
"metadata": {},
"output_type": "execute_result"
}
],
"execution_count": 29
},
{
"cell_type": "code",
"metadata": {
"ExecuteTime": {
"end_time": "2025-05-03T09:53:35.039433Z",
"start_time": "2025-05-03T09:53:31.791117Z"
}
},
"source": [
"from openai import OpenAI\n",
"\n",
"client = OpenAI(api_key=\"COGNEE_API_KEY\", base_url=\"http://localhost:8000/api/v1/\")\n",
"\n",
"client.responses.create(\n",
" model=\"cognee-v1\",\n",
" input=\"Search insights about NLP\",\n",
")"
],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"\u001B[1mHTTP Request: POST http://localhost:8000/api/v1/responses \"HTTP/1.1 307 Temporary Redirect\"\u001B[0m\n",
"\u001B[1mHTTP Request: POST http://localhost:8000/api/v1/responses/ \"HTTP/1.1 200 OK\"\u001B[0m"
]
},
{
"data": {
"text/plain": [
"Response(id='resp_6815e79c2e5c8191a2b2279408544f560bbe226c4ebfeb1f', created_at=None, error=None, incomplete_details=None, instructions=None, metadata=None, model='cognee-v1', object='response', output=None, parallel_tool_calls=None, temperature=None, tool_choice=None, tools=None, top_p=None, max_output_tokens=None, previous_response_id=None, reasoning=None, status='completed', text=None, truncation=None, usage=ResponseUsage(input_tokens=None, output_tokens=None, output_tokens_details=None, total_tokens=215, prompt_tokens=188, completion_tokens=27), user=None, created=1746266015, toolCalls=[{'id': 'call_lP8Le7A76id2MIKqVYa3gvI8', 'type': 'function', 'function': {'name': 'search', 'arguments': '{\"search_query\":\"NLP\",\"search_type\":\"INSIGHTS\",\"top_k\":5}'}, 'output': {'status': 'success', 'data': {'result': [[{'created_at': 1746265986926, 'updated_at': '2025-05-03T09:53:06.926000Z', 'ontology_valid': False, 'version': 1, 'topological_rank': 0, 'metadata': {'index_fields': ['text']}, 'type': 'DocumentChunk', 'belongs_to_set': None, 'text': 'Natural language processing (NLP) is an interdisciplinary subfield of computer science and information retrieval.', 'chunk_size': 34, 'chunk_index': 0, 'cut_type': 'sentence_end', 'id': '6b85cc38-be93-5ada-bed1-90cddad3c385'}, {'source_node_id': '6b85cc38-be93-5ada-bed1-90cddad3c385', 'target_node_id': 'bc338a39-64d6-549a-acec-da60846dd90d', 'relationship_name': 'contains', 'updated_at': '2025-05-03T09:53:24.988192Z'}, {'created_at': 1746265992781, 'updated_at': '2025-05-03T09:53:12.781000Z', 'ontology_valid': False, 'version': 1, 'topological_rank': 0, 'metadata': {'index_fields': ['name']}, 'type': 'Entity', 'belongs_to_set': None, 'name': 'natural language processing', 'description': 'An interdisciplinary subfield of computer science and information retrieval.', 'id': 'bc338a39-64d6-549a-acec-da60846dd90d'}], [{'created_at': 1746265992781, 'updated_at': '2025-05-03T09:53:12.781000Z', 'ontology_valid': False, 'version': 1, 'topological_rank': 0, 'metadata': {'index_fields': ['name']}, 'type': 'Entity', 'belongs_to_set': None, 'name': 'natural language processing', 'description': 'An interdisciplinary subfield of computer science and information retrieval.', 'id': 'bc338a39-64d6-549a-acec-da60846dd90d'}, {'source_node_id': 'bc338a39-64d6-549a-acec-da60846dd90d', 'target_node_id': 'dd9713b7-dc20-5101-aad0-1c4216811147', 'relationship_name': 'is_a', 'updated_at': '2025-05-03T09:53:24.988195Z'}, {'created_at': 1746265992781, 'updated_at': '2025-05-03T09:53:12.781000Z', 'ontology_valid': False, 'version': 1, 'topological_rank': 0, 'metadata': {'index_fields': ['name']}, 'type': 'EntityType', 'belongs_to_set': None, 'name': 'concept', 'description': 'concept', 'id': 'dd9713b7-dc20-5101-aad0-1c4216811147'}], [{'created_at': 1746265992781, 'updated_at': '2025-05-03T09:53:12.781000Z', 'ontology_valid': False, 'version': 1, 'topological_rank': 0, 'metadata': {'index_fields': ['name']}, 'type': 'Entity', 'belongs_to_set': None, 'name': 'natural language processing', 'description': 'An interdisciplinary subfield of computer science and information retrieval.', 'id': 'bc338a39-64d6-549a-acec-da60846dd90d'}, {'relationship_name': 'is_a_subfield_of', 'source_node_id': 'bc338a39-64d6-549a-acec-da60846dd90d', 'target_node_id': '6218dbab-eb6a-5759-a864-b3419755ffe0', 'ontology_valid': False, 'updated_at': '2025-05-03T09:53:18.000683Z'}, {'created_at': 1746265992782, 'updated_at': '2025-05-03T09:53:12.782000Z', 'ontology_valid': False, 'version': 1, 'topological_rank': 0, 'metadata': {'index_fields': ['name']}, 'type': 'Entity', 'belongs_to_set': None, 'name': 'computer science', 'description': 'The study of computers and computational systems.', 'id': '6218dbab-eb6a-5759-a864-b3419755ffe0'}], [{'created_at': 1746265992781, 'updated_at': '2025-05-03T09:53:12.781000Z', 'ontology_valid': False, 'version': 1, 'topological_rank': 0, 'metadata': {'index_fields': ['name']}, 'type': 'Entity', 'belongs_to_set': None, 'name': 'natural language processing', 'description': 'An interdisciplinary subfield of computer science and information retrieval.', 'id': 'bc338a39-64d6-549a-acec-da60846dd90d'}, {'relationship_name': 'is_a_subfield_of', 'source_node_id': 'bc338a39-64d6-549a-acec-da60846dd90d', 'target_node_id': '02bdab9a-0981-518c-a0d4-1684e0329447', 'ontology_valid': False, 'updated_at': '2025-05-03T09:53:18.000691Z'}, {'created_at': 1746265992782, 'updated_at': '2025-05-03T09:53:12.782000Z', 'ontology_valid': False, 'version': 1, 'topological_rank': 0, 'metadata': {'index_fields': ['name']}, 'type': 'Entity', 'belongs_to_set': None, 'name': 'information retrieval', 'description': 'The activity of obtaining information system resources that are relevant to an information request.', 'id': '02bdab9a-0981-518c-a0d4-1684e0329447'}]]}}}])"
]
},
"execution_count": 30,
"metadata": {},
"output_type": "execute_result"
}
],
"execution_count": 30
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.8"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

49
run_cognee_api_server.py Normal file
View file

@ -0,0 +1,49 @@
#!/usr/bin/env python3
"""
Script to run the Cognee API server for testing.
"""
import os
import sys
import argparse
from cognee.api.client import start_api_server
def main():
"""Run the Cognee API server with specified host and port."""
parser = argparse.ArgumentParser(description="Run the Cognee API server for testing.")
parser.add_argument(
"--host",
default="0.0.0.0",
help="Host to bind the server to (default: 0.0.0.0)"
)
parser.add_argument(
"--port",
type=int,
default=8000,
help="Port to bind the server to (default: 8000)"
)
parser.add_argument(
"--env",
choices=["prod", "dev", "local"],
default="local",
help="Environment to run the server in (default: local)"
)
args = parser.parse_args()
# Set environment variable
os.environ["ENV"] = args.env
print(f"Starting Cognee API server in {args.env} mode on {args.host}:{args.port}")
try:
start_api_server(host=args.host, port=args.port)
except KeyboardInterrupt:
print("\nServer stopped by user")
sys.exit(0)
except Exception as e:
print(f"Error starting server: {e}")
sys.exit(1)
if __name__ == "__main__":
main()

View file

@ -0,0 +1,180 @@
#!/usr/bin/env python3
"""
Test script for Cognee's OpenAI-compatible Responses API
"""
import os
import json
import asyncio
import httpx
# Configuration
API_BASE_URL = "http://localhost:8000" # Change to your actual API URL
API_ENDPOINT = "/api/v1/responses/" # Added trailing slash to match the server's redirection
AUTH_ENDPOINT = "/api/v1/auth/login"
# JWT token generated from get_token.py (valid for 1 hour from generation)
# Replace this with a new token if tests fail due to expiration
JWT_TOKEN = os.getenv(
"COGNEE_JWT_TOKEN",
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyX2lkIjoiNjc2MzU1NGMtOTFiZC00MzJjLWFiYTgtZDQyY2Q3MmVkNjU5IiwidGVuYW50X2lkIjoiNDUyMzU0NGQtODJiZC00MzJjLWFjYTctZDQyY2Q3MmVkNjUxIiwicm9sZXMiOlsiYWRtaW4iXSwiZXhwIjoxNzQ2NzM1NTg3fQ.fZtYlhg-7S8ikCNsjmAnYYpv9FQYWaXWgbYnTFkdek0"
)
# Note: Direct function tests using the tools parameter aren't working due to
# issues with how the OpenAI client is processing the requests. However, we can test
# the API by using prompts that should trigger specific functions.
async def test_with_default_tools(token=None):
"""Test using the default tools provided by the API"""
print("\n--- Testing the OpenAI-compatible Responses API ---")
# Define payloads for different types of prompts that should trigger different functions
payloads = [
{
"name": "General API capabilities",
"payload": {
"model": "cognee-v1",
"input": "What can I do with this API?",
"tool_choice": "auto"
},
"expected_function": None # We don't expect any function call for this
},
{
"name": "Search query",
"payload": {
"model": "cognee-v1",
"input": "What information do we have about Python's asyncio module?",
"tool_choice": "auto"
},
"expected_function": "search" # We expect a search function call
},
{
"name": "Cognify request",
"payload": {
"model": "cognee-v1",
"input": "Please add this information to the knowledge graph: Python's asyncio module provides infrastructure for writing single-threaded concurrent code using coroutines.",
"tool_choice": "auto"
},
"expected_function": "cognify_text" # We expect a cognify_text function call
}
]
test_results = {}
for test_case in payloads:
print(f"\nTesting: {test_case['name']}")
headers = {"Content-Type": "application/json"}
if token:
headers["Authorization"] = f"Bearer {token}"
try:
async with httpx.AsyncClient(timeout=60.0) as client: # Increased timeout
response = await client.post(
f"{API_BASE_URL}{API_ENDPOINT}",
json=test_case["payload"],
headers=headers
)
print(f"Status code: {response.status_code}")
if response.status_code == 200:
result = response.json()
print(json.dumps(result, indent=2))
# Check for tool calls - handle both snake_case and camelCase property names
tool_calls = result.get("tool_calls", result.get("toolCalls", []))
if tool_calls:
function_names = [tc["function"]["name"] for tc in tool_calls if "function" in tc and "name" in tc["function"]]
expected_fn = test_case["expected_function"]
if expected_fn is None:
# No function expected
if not function_names:
test_pass = True
print(f"{test_case['name']} test passed: No tool calls as expected")
else:
test_pass = False
print(f"{test_case['name']} test failed: Expected no function calls, but got {function_names}")
else:
# Specific function expected
if expected_fn in function_names:
test_pass = True
print(f"{test_case['name']} test passed: Expected function '{expected_fn}' was called")
# If this is a cognify_text function, check for success status
if expected_fn == "cognify_text":
for tc in tool_calls:
if tc.get("function", {}).get("name") == "cognify_text":
output = tc.get("output", {})
if output.get("status") == "success":
print(f"✅ cognify_text operation was successful")
else:
print(f"❌ cognify_text operation failed: {output}")
# If this is a search function, check if we got results
if expected_fn == "search":
for tc in tool_calls:
if tc.get("function", {}).get("name") == "search":
output = tc.get("output", {})
results = output.get("data", {}).get("result", [])
if results:
print(f"✅ search operation returned {len(results)} results")
else:
print(f"⚠️ search operation did not return any results")
else:
test_pass = False
print(f"{test_case['name']} test failed: Expected function '{expected_fn}' was not called. Got {function_names}")
else:
# No tool_calls in result
if test_case["expected_function"] is None:
test_pass = True
print(f"{test_case['name']} test passed: No tool calls as expected")
else:
test_pass = False
print(f"{test_case['name']} test failed: Expected function '{test_case['expected_function']}' but no tool calls were made")
else:
test_pass = False
print(f"❌ Request failed: {response.text}")
except Exception as e:
test_pass = False
print(f"❌ Exception during test: {str(e)}")
test_results[test_case["name"]] = test_pass
# Print summary
print("\n=== TEST RESULTS SUMMARY ===")
passed = sum(1 for result in test_results.values() if result)
total = len(test_results)
for test_name, result in test_results.items():
status = "✅ PASSED" if result else "❌ FAILED"
print(f"{test_name}: {status}")
print(f"\nPassed {passed}/{total} tests ({passed/total*100:.0f}%)")
return passed == total
async def main():
"""Run all tests"""
print("Starting Cognee Responses API Tests")
# Use the JWT token for authentication
token = JWT_TOKEN
print(f"Using JWT token: {token[:20]}...")
# Run tests with the token
success = await test_with_default_tools(token)
print("\nAll tests completed")
# Return proper exit code for CI/CD pipelines
return 0 if success else 1
if __name__ == "__main__":
exit_code = asyncio.run(main())
# Use exit code to signal test success/failure
import sys
sys.exit(exit_code)

View file

@ -0,0 +1,353 @@
#!/usr/bin/env python3
"""
Comprehensive test script for Cognee's OpenAI-compatible Responses API
"""
import os
import json
import asyncio
import httpx
from typing import Dict, Any, Optional, List
import sys
# Configuration
API_BASE_URL = os.getenv("COGNEE_API_URL", "http://localhost:8000")
API_ENDPOINT = "/api/v1/responses/"
AUTH_ENDPOINT = "/api/v1/auth/login"
EMAIL = os.getenv("COGNEE_EMAIL", "default_user@example.com") # Default test user
PASSWORD = os.getenv("COGNEE_PASSWORD", "default_password") # Default test password
# Constants
GREEN = "\033[92m"
RED = "\033[91m"
YELLOW = "\033[93m"
RESET = "\033[0m"
def log_success(message: str) -> None:
"""Print a success message in green"""
print(f"{GREEN}{message}{RESET}")
def log_error(message: str) -> None:
"""Print an error message in red"""
print(f"{RED}{message}{RESET}")
def log_warning(message: str) -> None:
"""Print a warning message in yellow"""
print(f"{YELLOW}⚠️ {message}{RESET}")
def log_info(message: str) -> None:
"""Print an info message"""
print(f" {message}")
async def authenticate() -> Optional[str]:
"""Authenticate with the API and return access token"""
log_info("Authenticating with the API...")
auth_payload = {
"email": EMAIL,
"password": PASSWORD
}
try:
async with httpx.AsyncClient() as client:
response = await client.post(
f"{API_BASE_URL}{AUTH_ENDPOINT}",
json=auth_payload,
headers={"Content-Type": "application/json"}
)
if response.status_code == 200:
auth_data = response.json()
token = auth_data.get("access_token")
if token:
log_success(f"Authentication successful")
return token
else:
log_error("Authentication response did not contain access token")
return None
else:
log_error(f"Authentication failed with status {response.status_code}: {response.text}")
return None
except Exception as e:
log_error(f"Authentication error: {str(e)}")
return None
async def make_api_request(
payload: Dict[str, Any],
token: Optional[str] = None,
expected_status: int = 200
) -> Dict[str, Any]:
"""Make a request to the API and return the response"""
headers = {"Content-Type": "application/json"}
if token:
headers["Authorization"] = f"Bearer {token}"
try:
async with httpx.AsyncClient() as client:
response = await client.post(
f"{API_BASE_URL}{API_ENDPOINT}",
json=payload,
headers=headers,
timeout=60.0 # Increased timeout for cognify operations
)
log_info(f"Response status: {response.status_code}")
if response.status_code == expected_status:
if expected_status == 200:
result = response.json()
return result
else:
return {"status": response.status_code, "text": response.text}
else:
log_error(f"Request failed with status {response.status_code}: {response.text}")
return {"error": response.text, "status_code": response.status_code}
except Exception as e:
log_error(f"Request error: {str(e)}")
return {"error": str(e)}
def validate_response(response: Dict[str, Any]) -> bool:
"""Validate the response structure"""
required_fields = ["id", "created", "model", "object", "status", "tool_calls"]
missing_fields = [field for field in required_fields if field not in response]
if missing_fields:
log_error(f"Response missing required fields: {', '.join(missing_fields)}")
return False
if response["object"] != "response":
log_error(f"Expected 'object' to be 'response', got '{response['object']}'")
return False
if not isinstance(response["tool_calls"], list):
log_error(f"Expected 'tool_calls' to be a list, got {type(response['tool_calls'])}")
return False
for i, tool_call in enumerate(response["tool_calls"]):
if "id" not in tool_call or "function" not in tool_call or "type" not in tool_call:
log_error(f"Tool call {i} missing required fields")
return False
if "name" not in tool_call["function"] or "arguments" not in tool_call["function"]:
log_error(f"Tool call {i} function missing required fields")
return False
return True
async def test_search_function(token: Optional[str] = None) -> bool:
"""Test the search function via the responses API"""
log_info("\n--- Testing search function ---")
# Define request payload
payload = {
"model": "cognee-v1",
"input": "What information do we have about Python's asyncio module?",
"tools": [
{
"type": "function",
"function": {
"name": "search",
"description": "Search for information within the knowledge graph",
"parameters": {
"type": "object",
"properties": {
"search_query": {
"type": "string",
"description": "The query to search for in the knowledge graph"
},
"search_type": {
"type": "string",
"description": "Type of search to perform",
"enum": ["INSIGHTS", "CODE", "GRAPH_COMPLETION", "SEMANTIC", "NATURAL_LANGUAGE"]
}
},
"required": ["search_query"]
}
}
}
],
"tool_choice": "auto"
}
result = await make_api_request(payload, token)
if "error" in result:
return False
if not validate_response(result):
return False
# Check if we got tool calls
if not result["tool_calls"]:
log_warning("No tool calls found in response")
return False
search_tool_calls = [tc for tc in result["tool_calls"]
if tc["function"]["name"] == "search"]
if not search_tool_calls:
log_error("No search tool calls found in response")
return False
log_success("Search function test passed")
return True
async def test_cognify_function(token: Optional[str] = None) -> bool:
"""Test the cognify_text function via the responses API"""
log_info("\n--- Testing cognify_text function ---")
# Define request payload
payload = {
"model": "cognee-v1",
"input": "Please add this information to the knowledge graph: Python's asyncio module provides infrastructure for writing single-threaded concurrent code using coroutines.",
"tools": [
{
"type": "function",
"function": {
"name": "cognify_text",
"description": "Convert text into a knowledge graph",
"parameters": {
"type": "object",
"properties": {
"text": {
"type": "string",
"description": "Text content to be converted into a knowledge graph"
}
},
"required": ["text"]
}
}
}
],
"tool_choice": "auto"
}
result = await make_api_request(payload, token)
if "error" in result:
return False
if not validate_response(result):
return False
# Check if we got tool calls
if not result["tool_calls"]:
log_warning("No tool calls found in response")
return False
cognify_tool_calls = [tc for tc in result["tool_calls"]
if tc["function"]["name"] == "cognify_text"]
if not cognify_tool_calls:
log_error("No cognify_text tool calls found in response")
return False
# Check if output is successful
for tool_call in cognify_tool_calls:
if "output" in tool_call:
output = tool_call["output"]
if output.get("status") != "success":
log_error(f"Cognify operation failed: {output}")
return False
log_success("Cognify function test passed")
return True
async def test_with_default_tools(token: Optional[str] = None) -> bool:
"""Test using the default tools provided by the API"""
log_info("\n--- Testing with default tools ---")
# Define request payload - omitting tools to use defaults
payload = {
"model": "cognee-v1",
"input": "What can I do with this API?",
"tool_choice": "auto"
}
result = await make_api_request(payload, token)
if "error" in result:
return False
if not validate_response(result):
return False
log_success("Default tools test passed")
return True
async def test_invalid_request(token: Optional[str] = None) -> bool:
"""Test handling of invalid requests"""
log_info("\n--- Testing invalid request handling ---")
# Missing required parameter (model)
payload = {
"input": "What can I do with this API?"
}
result = await make_api_request(payload, token, expected_status=422)
if "status_code" in result and result["status_code"] == 422:
log_success("Invalid request properly rejected")
return True
else:
log_error("Invalid request not properly rejected")
return False
async def main():
"""Run all tests"""
log_info("Starting Cognee Responses API Tests")
# Get authentication token
token = await authenticate()
# Run tests
results = {}
# Basic functionality
results["search_function"] = await test_search_function(token)
results["cognify_function"] = await test_cognify_function(token)
results["default_tools"] = await test_with_default_tools(token)
# Error handling
results["invalid_request"] = await test_invalid_request(token)
# Summary
print("\n" + "="*50)
print("TEST RESULTS SUMMARY")
print("="*50)
passed = sum(1 for result in results.values() if result)
total = len(results)
for test_name, result in results.items():
status = f"{GREEN}PASSED{RESET}" if result else f"{RED}FAILED{RESET}"
print(f"{test_name.replace('_', ' ').title()}: {status}")
print("-"*50)
print(f"Tests passed: {passed}/{total} ({100 * passed / total:.1f}%)")
if passed == total:
log_success("\nAll tests passed! The OpenAI-compatible Responses API is working correctly.")
return 0
else:
log_error("\nSome tests failed. Please check the logs for details.")
return 1
if __name__ == "__main__":
exit_code = asyncio.run(main())
sys.exit(exit_code)