Merge branch 'dev' into feature/add-health-checks
This commit is contained in:
commit
f304d476d9
5 changed files with 218 additions and 146 deletions
|
|
@ -51,7 +51,7 @@ RUN apt-get update && apt-get install -y \
|
|||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=uv /root/.local /root/.local
|
||||
COPY --from=uv /usr/local /usr/local
|
||||
COPY --from=uv /app /app
|
||||
|
||||
RUN chmod +x /app/entrypoint.sh
|
||||
|
|
|
|||
|
|
@ -48,27 +48,27 @@ if [ "$ENVIRONMENT" = "dev" ] || [ "$ENVIRONMENT" = "local" ]; then
|
|||
if [ "$DEBUG" = "true" ]; then
|
||||
echo "Waiting for the debugger to attach..."
|
||||
if [ "$TRANSPORT_MODE" = "sse" ]; then
|
||||
exec python -m debugpy --wait-for-client --listen 0.0.0.0:$DEBUG_PORT -m cognee --transport sse
|
||||
exec python -m debugpy --wait-for-client --listen 0.0.0.0:$DEBUG_PORT -m cognee --transport sse --no-migration
|
||||
elif [ "$TRANSPORT_MODE" = "http" ]; then
|
||||
exec python -m debugpy --wait-for-client --listen 0.0.0.0:$DEBUG_PORT -m cognee --transport http --host 0.0.0.0 --port $HTTP_PORT
|
||||
exec python -m debugpy --wait-for-client --listen 0.0.0.0:$DEBUG_PORT -m cognee --transport http --host 0.0.0.0 --port $HTTP_PORT --no-migration
|
||||
else
|
||||
exec python -m debugpy --wait-for-client --listen 0.0.0.0:$DEBUG_PORT -m cognee --transport stdio
|
||||
exec python -m debugpy --wait-for-client --listen 0.0.0.0:$DEBUG_PORT -m cognee --transport stdio --no-migration
|
||||
fi
|
||||
else
|
||||
if [ "$TRANSPORT_MODE" = "sse" ]; then
|
||||
exec cognee --transport sse
|
||||
exec cognee --transport sse --no-migration
|
||||
elif [ "$TRANSPORT_MODE" = "http" ]; then
|
||||
exec cognee --transport http --host 0.0.0.0 --port $HTTP_PORT
|
||||
exec cognee --transport http --host 0.0.0.0 --port $HTTP_PORT --no-migration
|
||||
else
|
||||
exec cognee --transport stdio
|
||||
exec cognee --transport stdio --no-migration
|
||||
fi
|
||||
fi
|
||||
else
|
||||
if [ "$TRANSPORT_MODE" = "sse" ]; then
|
||||
exec cognee --transport sse
|
||||
exec cognee --transport sse --no-migration
|
||||
elif [ "$TRANSPORT_MODE" = "http" ]; then
|
||||
exec cognee --transport http --host 0.0.0.0 --port $HTTP_PORT
|
||||
exec cognee --transport http --host 0.0.0.0 --port $HTTP_PORT --no-migration
|
||||
else
|
||||
exec cognee --transport stdio
|
||||
exec cognee --transport stdio --no-migration
|
||||
fi
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ requires-python = ">=3.10"
|
|||
dependencies = [
|
||||
# For local cognee repo usage remove comment bellow and add absolute path to cognee. Then run `uv sync --reinstall` in the mcp folder on local cognee changes.
|
||||
# "cognee[postgres,codegraph,gemini,huggingface,docs,neo4j] @ file:/Users/vasilije/Projects/tiktok/cognee",
|
||||
"cognee[postgres,codegraph,gemini,huggingface,docs,neo4j]>=0.2.0,<1.0.0",
|
||||
"cognee[postgres,codegraph,gemini,huggingface,docs,neo4j]==0.2.1",
|
||||
"fastmcp>=2.10.0,<3.0.0",
|
||||
"mcp>=1.12.0,<2.0.0",
|
||||
"uv>=0.6.3,<1.0.0",
|
||||
|
|
|
|||
|
|
@ -123,11 +123,34 @@ async def cognee_add_developer_rules(
|
|||
@mcp.tool()
|
||||
async def cognify(data: str, graph_model_file: str = None, graph_model_name: str = None) -> list:
|
||||
"""
|
||||
Transform data into a structured knowledge graph in Cognee's memory layer.
|
||||
Transform ingested data into a structured knowledge graph.
|
||||
|
||||
This function launches a background task that processes the provided text/file location and
|
||||
generates a knowledge graph representation. The function returns immediately while
|
||||
the processing continues in the background due to MCP timeout constraints.
|
||||
This is the core processing step in Cognee that converts raw text and documents
|
||||
into an intelligent knowledge graph. It analyzes content, extracts entities and
|
||||
relationships, and creates semantic connections for enhanced search and reasoning.
|
||||
|
||||
Prerequisites:
|
||||
- **LLM_API_KEY**: Must be configured (required for entity extraction and graph generation)
|
||||
- **Data Added**: Must have data previously added via `cognee.add()`
|
||||
- **Vector Database**: Must be accessible for embeddings storage
|
||||
- **Graph Database**: Must be accessible for relationship storage
|
||||
|
||||
Input Requirements:
|
||||
- **Content Types**: Works with any text-extractable content including:
|
||||
* Natural language documents
|
||||
* Structured data (CSV, JSON)
|
||||
* Code repositories
|
||||
* Academic papers and technical documentation
|
||||
* Mixed multimedia content (with text extraction)
|
||||
|
||||
Processing Pipeline:
|
||||
1. **Document Classification**: Identifies document types and structures
|
||||
2. **Permission Validation**: Ensures user has processing rights
|
||||
3. **Text Chunking**: Breaks content into semantically meaningful segments
|
||||
4. **Entity Extraction**: Identifies key concepts, people, places, organizations
|
||||
5. **Relationship Detection**: Discovers connections between entities
|
||||
6. **Graph Construction**: Builds semantic knowledge graph with embeddings
|
||||
7. **Content Summarization**: Creates hierarchical summaries for navigation
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
|
@ -152,11 +175,60 @@ async def cognify(data: str, graph_model_file: str = None, graph_model_name: str
|
|||
A list containing a single TextContent object with information about the
|
||||
background task launch and how to check its status.
|
||||
|
||||
Next Steps:
|
||||
After successful cognify processing, use search functions to query the knowledge:
|
||||
|
||||
```python
|
||||
import cognee
|
||||
from cognee import SearchType
|
||||
|
||||
# Process your data into knowledge graph
|
||||
await cognee.cognify()
|
||||
|
||||
# Query for insights using different search types:
|
||||
|
||||
# 1. Natural language completion with graph context
|
||||
insights = await cognee.search(
|
||||
"What are the main themes?",
|
||||
query_type=SearchType.GRAPH_COMPLETION
|
||||
)
|
||||
|
||||
# 2. Get entity relationships and connections
|
||||
relationships = await cognee.search(
|
||||
"connections between concepts",
|
||||
query_type=SearchType.INSIGHTS
|
||||
)
|
||||
|
||||
# 3. Find relevant document chunks
|
||||
chunks = await cognee.search(
|
||||
"specific topic",
|
||||
query_type=SearchType.CHUNKS
|
||||
)
|
||||
```
|
||||
|
||||
Environment Variables:
|
||||
Required:
|
||||
- LLM_API_KEY: API key for your LLM provider
|
||||
|
||||
Optional:
|
||||
- LLM_PROVIDER, LLM_MODEL, VECTOR_DB_PROVIDER, GRAPH_DATABASE_PROVIDER
|
||||
- LLM_RATE_LIMIT_ENABLED: Enable rate limiting (default: False)
|
||||
- LLM_RATE_LIMIT_REQUESTS: Max requests per interval (default: 60)
|
||||
|
||||
Notes
|
||||
-----
|
||||
- The function launches a background task and returns immediately
|
||||
- The actual cognify process may take significant time depending on text length
|
||||
- Use the cognify_status tool to check the progress of the operation
|
||||
|
||||
Raises
|
||||
------
|
||||
InvalidValueError
|
||||
If LLM_API_KEY is not set
|
||||
ValueError
|
||||
If chunks exceed max token limits (reduce chunk_size)
|
||||
DatabaseNotCreatedError
|
||||
If databases are not properly initialized
|
||||
"""
|
||||
|
||||
async def cognify_task(
|
||||
|
|
@ -327,17 +399,69 @@ async def codify(repo_path: str) -> list:
|
|||
@mcp.tool()
|
||||
async def search(search_query: str, search_type: str) -> list:
|
||||
"""
|
||||
Search the Cognee knowledge graph for information relevant to the query.
|
||||
Search and query the knowledge graph for insights, information, and connections.
|
||||
|
||||
This function executes a search against the Cognee knowledge graph using the
|
||||
specified query and search type. It returns formatted results based on the
|
||||
search type selected.
|
||||
This is the final step in the Cognee workflow that retrieves information from the
|
||||
processed knowledge graph. It supports multiple search modes optimized for different
|
||||
use cases - from simple fact retrieval to complex reasoning and code analysis.
|
||||
|
||||
Search Prerequisites:
|
||||
- **LLM_API_KEY**: Required for GRAPH_COMPLETION and RAG_COMPLETION search types
|
||||
- **Data Added**: Must have data previously added via `cognee.add()`
|
||||
- **Knowledge Graph Built**: Must have processed data via `cognee.cognify()`
|
||||
- **Vector Database**: Must be accessible for semantic search functionality
|
||||
|
||||
Search Types & Use Cases:
|
||||
|
||||
**GRAPH_COMPLETION** (Recommended):
|
||||
Natural language Q&A using full graph context and LLM reasoning.
|
||||
Best for: Complex questions, analysis, summaries, insights.
|
||||
Returns: Conversational AI responses with graph-backed context.
|
||||
|
||||
**RAG_COMPLETION**:
|
||||
Traditional RAG using document chunks without graph structure.
|
||||
Best for: Direct document retrieval, specific fact-finding.
|
||||
Returns: LLM responses based on relevant text chunks.
|
||||
|
||||
**INSIGHTS**:
|
||||
Structured entity relationships and semantic connections.
|
||||
Best for: Understanding concept relationships, knowledge mapping.
|
||||
Returns: Formatted relationship data and entity connections.
|
||||
|
||||
**CHUNKS**:
|
||||
Raw text segments that match the query semantically.
|
||||
Best for: Finding specific passages, citations, exact content.
|
||||
Returns: Ranked list of relevant text chunks with metadata.
|
||||
|
||||
**SUMMARIES**:
|
||||
Pre-generated hierarchical summaries of content.
|
||||
Best for: Quick overviews, document abstracts, topic summaries.
|
||||
Returns: Multi-level summaries from detailed to high-level.
|
||||
|
||||
**CODE**:
|
||||
Code-specific search with syntax and semantic understanding.
|
||||
Best for: Finding functions, classes, implementation patterns.
|
||||
Returns: Structured code information with context and relationships.
|
||||
|
||||
**CYPHER**:
|
||||
Direct graph database queries using Cypher syntax.
|
||||
Best for: Advanced users, specific graph traversals, debugging.
|
||||
Returns: Raw graph query results.
|
||||
|
||||
**FEELING_LUCKY**:
|
||||
Intelligently selects and runs the most appropriate search type.
|
||||
Best for: General-purpose queries or when you're unsure which search type is best.
|
||||
Returns: The results from the automatically selected search type.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
search_query : str
|
||||
The search query in natural language. This can be a question, instruction, or
|
||||
any text that expresses what information is needed from the knowledge graph.
|
||||
Your question or search query in natural language.
|
||||
Examples:
|
||||
- "What are the main themes in this research?"
|
||||
- "How do these concepts relate to each other?"
|
||||
- "Find information about machine learning algorithms"
|
||||
- "What functions handle user authentication?"
|
||||
|
||||
search_type : str
|
||||
The type of search to perform. Valid options include:
|
||||
|
|
@ -346,6 +470,9 @@ async def search(search_query: str, search_type: str) -> list:
|
|||
- "CODE": Returns code-related knowledge in JSON format
|
||||
- "CHUNKS": Returns raw text chunks from the knowledge graph
|
||||
- "INSIGHTS": Returns relationships between nodes in readable format
|
||||
- "SUMMARIES": Returns pre-generated hierarchical summaries
|
||||
- "CYPHER": Direct graph database queries
|
||||
- "FEELING_LUCKY": Automatically selects best search type
|
||||
|
||||
The search_type is case-insensitive and will be converted to uppercase.
|
||||
|
||||
|
|
@ -354,16 +481,45 @@ async def search(search_query: str, search_type: str) -> list:
|
|||
list
|
||||
A list containing a single TextContent object with the search results.
|
||||
The format of the result depends on the search_type:
|
||||
- For CODE: JSON-formatted search results
|
||||
- For GRAPH_COMPLETION/RAG_COMPLETION: A single text completion
|
||||
- For CHUNKS: String representation of the raw chunks
|
||||
- For INSIGHTS: Formatted string showing node relationships
|
||||
- For other types: String representation of the search results
|
||||
- **GRAPH_COMPLETION/RAG_COMPLETION**: Conversational AI response strings
|
||||
- **INSIGHTS**: Formatted relationship descriptions and entity connections
|
||||
- **CHUNKS**: Relevant text passages with source metadata
|
||||
- **SUMMARIES**: Hierarchical summaries from general to specific
|
||||
- **CODE**: Structured code information with context
|
||||
- **FEELING_LUCKY**: Results in format of automatically selected search type
|
||||
- **CYPHER**: Raw graph query results
|
||||
|
||||
Performance & Optimization:
|
||||
- **GRAPH_COMPLETION**: Slower but most intelligent, uses LLM + graph context
|
||||
- **RAG_COMPLETION**: Medium speed, uses LLM + document chunks (no graph traversal)
|
||||
- **INSIGHTS**: Fast, returns structured relationships without LLM processing
|
||||
- **CHUNKS**: Fastest, pure vector similarity search without LLM
|
||||
- **SUMMARIES**: Fast, returns pre-computed summaries
|
||||
- **CODE**: Medium speed, specialized for code understanding
|
||||
- **FEELING_LUCKY**: Variable speed, uses LLM + search type selection intelligently
|
||||
|
||||
Environment Variables:
|
||||
Required for LLM-based search types (GRAPH_COMPLETION, RAG_COMPLETION):
|
||||
- LLM_API_KEY: API key for your LLM provider
|
||||
|
||||
Optional:
|
||||
- LLM_PROVIDER, LLM_MODEL: Configure LLM for search responses
|
||||
- VECTOR_DB_PROVIDER: Must match what was used during cognify
|
||||
- GRAPH_DATABASE_PROVIDER: Must match what was used during cognify
|
||||
|
||||
Notes
|
||||
-----
|
||||
- Different search types produce different output formats
|
||||
- The function handles the conversion between Cognee's internal result format and MCP's output format
|
||||
|
||||
Raises
|
||||
------
|
||||
InvalidValueError
|
||||
If LLM_API_KEY is not set (for LLM-based search types)
|
||||
ValueError
|
||||
If query_text is empty or search parameters are invalid
|
||||
NoDataError
|
||||
If no relevant data found for the search query
|
||||
"""
|
||||
|
||||
async def search_task(search_query: str, search_type: str) -> str:
|
||||
|
|
@ -782,30 +938,38 @@ async def main():
|
|||
help="Log level for the HTTP server (default: info)",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Run Alembic migrations from the main cognee directory where alembic.ini is located
|
||||
print("Running database migrations...")
|
||||
migration_result = subprocess.run(
|
||||
["python", "-m", "alembic", "upgrade", "head"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=Path(__file__).resolve().parent.parent.parent,
|
||||
parser.add_argument(
|
||||
"--no-migration",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="Argument stops database migration from being attempted",
|
||||
)
|
||||
|
||||
if migration_result.returncode != 0:
|
||||
migration_output = migration_result.stderr + migration_result.stdout
|
||||
# Check for the expected UserAlreadyExists error (which is not critical)
|
||||
if (
|
||||
"UserAlreadyExists" in migration_output
|
||||
or "User default_user@example.com already exists" in migration_output
|
||||
):
|
||||
print("Warning: Default user already exists, continuing startup...")
|
||||
else:
|
||||
print(f"Migration failed with unexpected error: {migration_output}")
|
||||
sys.exit(1)
|
||||
args = parser.parse_args()
|
||||
|
||||
print("Database migrations done.")
|
||||
if not args.no_migration:
|
||||
# Run Alembic migrations from the main cognee directory where alembic.ini is located
|
||||
logger.info("Running database migrations...")
|
||||
migration_result = subprocess.run(
|
||||
["python", "-m", "alembic", "upgrade", "head"],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
cwd=Path(__file__).resolve().parent.parent.parent,
|
||||
)
|
||||
|
||||
if migration_result.returncode != 0:
|
||||
migration_output = migration_result.stderr + migration_result.stdout
|
||||
# Check for the expected UserAlreadyExists error (which is not critical)
|
||||
if (
|
||||
"UserAlreadyExists" in migration_output
|
||||
or "User default_user@example.com already exists" in migration_output
|
||||
):
|
||||
logger.warning("Warning: Default user already exists, continuing startup...")
|
||||
else:
|
||||
logger.error(f"Migration failed with unexpected error: {migration_output}")
|
||||
sys.exit(1)
|
||||
|
||||
logger.info("Database migrations done.")
|
||||
|
||||
logger.info(f"Starting MCP server with transport: {args.transport}")
|
||||
if args.transport == "stdio":
|
||||
|
|
|
|||
106
cognee-mcp/uv.lock
generated
106
cognee-mcp/uv.lock
generated
|
|
@ -621,8 +621,8 @@ wheels = [
|
|||
|
||||
[[package]]
|
||||
name = "cognee"
|
||||
version = "0.2.1.dev7"
|
||||
source = { directory = "../" }
|
||||
version = "0.2.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "aiofiles" },
|
||||
{ name = "aiohttp" },
|
||||
|
|
@ -664,6 +664,10 @@ dependencies = [
|
|||
{ name = "tiktoken" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/41/46/e7df1faebc92fa31ef8e33faf81feb435782727a789de5532d178e047224/cognee-0.2.1.tar.gz", hash = "sha256:bf5208383fc841981641c040e5b6588e58111af4d771f9eab6552f441e6a8e6c", size = 15497626, upload-time = "2025-07-25T15:53:57.009Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/b3/0e/b705c6eeb538dcdd8fbbb331be25fe8e0bbc1af7d76e61566ec9845b29d3/cognee-0.2.1-py3-none-any.whl", hash = "sha256:6e9d437e0c58a16233841ebf19b1a3d8b67da069460a4f08d0c0e00301b1d36d", size = 1019851, upload-time = "2025-07-25T15:53:53.488Z" },
|
||||
]
|
||||
|
||||
[package.optional-dependencies]
|
||||
codegraph = [
|
||||
|
|
@ -690,102 +694,6 @@ postgres = [
|
|||
{ name = "psycopg2" },
|
||||
]
|
||||
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "aiofiles", specifier = ">=23.2.1,<24.0.0" },
|
||||
{ name = "aiohttp", specifier = ">=3.11.14,<4.0.0" },
|
||||
{ name = "aiosqlite", specifier = ">=0.20.0,<1.0.0" },
|
||||
{ name = "alembic", specifier = ">=1.13.3,<2" },
|
||||
{ name = "anthropic", marker = "extra == 'anthropic'", specifier = ">=0.26.1,<0.27" },
|
||||
{ name = "asyncpg", marker = "extra == 'postgres'", specifier = ">=0.30.0,<1.0.0" },
|
||||
{ name = "asyncpg", marker = "extra == 'postgres-binary'", specifier = ">=0.30.0,<1.0.0" },
|
||||
{ name = "chromadb", marker = "extra == 'chromadb'", specifier = ">=0.3.0,<0.7" },
|
||||
{ name = "coverage", marker = "extra == 'dev'", specifier = ">=7.3.2,<8" },
|
||||
{ name = "debugpy", marker = "extra == 'debug'", specifier = ">=1.8.9,<2.0.0" },
|
||||
{ name = "deepeval", marker = "extra == 'deepeval'", specifier = ">=2.0.1,<3" },
|
||||
{ name = "deptry", marker = "extra == 'dev'", specifier = ">=0.20.0,<0.21" },
|
||||
{ name = "dlt", extras = ["sqlalchemy"], specifier = ">=1.9.0,<2" },
|
||||
{ name = "falkordb", marker = "extra == 'falkordb'", specifier = ">=1.0.9,<2.0.0" },
|
||||
{ name = "fastapi", specifier = ">=0.115.7,<1.0.0" },
|
||||
{ name = "fastapi-users", extras = ["sqlalchemy"], specifier = ">=14.0.1,<15.0.0" },
|
||||
{ name = "fastembed", marker = "python_full_version < '3.13' and extra == 'codegraph'", specifier = "<=0.6.0" },
|
||||
{ name = "filetype", specifier = ">=1.2.0,<2.0.0" },
|
||||
{ name = "gdown", marker = "extra == 'evals'", specifier = ">=5.2.0,<6" },
|
||||
{ name = "gitpython", marker = "extra == 'dev'", specifier = ">=3.1.43,<4" },
|
||||
{ name = "google-generativeai", marker = "extra == 'gemini'", specifier = ">=0.8.4,<0.9" },
|
||||
{ name = "graphiti-core", marker = "extra == 'graphiti'", specifier = ">=0.7.0,<0.8" },
|
||||
{ name = "groq", marker = "extra == 'groq'", specifier = ">=0.8.0,<1.0.0" },
|
||||
{ name = "gunicorn", marker = "extra == 'api'", specifier = ">=20.1.0,<24" },
|
||||
{ name = "instructor", specifier = ">=1.9.1,<2.0.0" },
|
||||
{ name = "jinja2", specifier = ">=3.1.3,<4" },
|
||||
{ name = "kuzu", specifier = "==0.11.0" },
|
||||
{ name = "lancedb", specifier = ">=0.24.0,<1.0.0" },
|
||||
{ name = "langchain-text-splitters", marker = "extra == 'langchain'", specifier = ">=0.3.2,<1.0.0" },
|
||||
{ name = "langfuse", specifier = ">=2.32.0,<3" },
|
||||
{ name = "langsmith", marker = "extra == 'langchain'", specifier = ">=0.2.3,<1.0.0" },
|
||||
{ name = "limits", specifier = ">=4.4.1,<5" },
|
||||
{ name = "litellm", specifier = ">=1.57.4,<1.71.0" },
|
||||
{ name = "llama-index-core", marker = "extra == 'llama-index'", specifier = ">=0.12.11,<0.13" },
|
||||
{ name = "matplotlib", specifier = ">=3.8.3,<4" },
|
||||
{ name = "mistral-common", marker = "extra == 'mistral'", specifier = ">=1.5.2,<2" },
|
||||
{ name = "mkdocs-material", marker = "extra == 'dev'", specifier = ">=9.5.42,<10" },
|
||||
{ name = "mkdocs-minify-plugin", marker = "extra == 'dev'", specifier = ">=0.8.0,<0.9" },
|
||||
{ name = "mkdocstrings", extras = ["python"], marker = "extra == 'dev'", specifier = ">=0.26.2,<0.27" },
|
||||
{ name = "modal", marker = "extra == 'distributed'", specifier = ">=1.0.5,<2.0.0" },
|
||||
{ name = "mypy", marker = "extra == 'dev'", specifier = ">=1.7.1,<2" },
|
||||
{ name = "neo4j", marker = "extra == 'neo4j'", specifier = ">=5.28.0,<6" },
|
||||
{ name = "networkx", specifier = ">=3.4.2,<4" },
|
||||
{ name = "nltk", specifier = ">=3.9.1,<4.0.0" },
|
||||
{ name = "notebook", marker = "extra == 'dev'", specifier = ">=7.1.0,<8" },
|
||||
{ name = "notebook", marker = "extra == 'notebook'", specifier = ">=7.1.0,<8" },
|
||||
{ name = "numpy", specifier = ">=1.26.4,<=4.0.0" },
|
||||
{ name = "onnxruntime", specifier = ">=1.0.0,<2.0.0" },
|
||||
{ name = "openai", specifier = ">=1.80.1,<2" },
|
||||
{ name = "pandas", specifier = ">=2.2.2,<3.0.0" },
|
||||
{ name = "pgvector", marker = "extra == 'postgres'", specifier = ">=0.3.5,<0.4" },
|
||||
{ name = "pgvector", marker = "extra == 'postgres-binary'", specifier = ">=0.3.5,<0.4" },
|
||||
{ name = "plotly", marker = "extra == 'evals'", specifier = ">=6.0.0,<7" },
|
||||
{ name = "posthog", marker = "extra == 'posthog'", specifier = ">=3.5.0,<4" },
|
||||
{ name = "pre-commit", specifier = ">=4.0.1,<5" },
|
||||
{ name = "psycopg2", marker = "extra == 'postgres'", specifier = ">=2.9.10,<3" },
|
||||
{ name = "psycopg2-binary", marker = "extra == 'postgres-binary'", specifier = ">=2.9.10,<3.0.0" },
|
||||
{ name = "pydantic", specifier = ">=2.10.5,<3.0.0" },
|
||||
{ name = "pydantic-settings", specifier = ">=2.2.1,<3" },
|
||||
{ name = "pylance", specifier = ">=0.22.0,<1.0.0" },
|
||||
{ name = "pylint", marker = "extra == 'dev'", specifier = ">=3.0.3,<4" },
|
||||
{ name = "pympler", specifier = ">=1.1,<2.0.0" },
|
||||
{ name = "pypdf", specifier = ">=4.1.0,<6.0.0" },
|
||||
{ name = "pypika", marker = "extra == 'chromadb'", specifier = "==0.48.8" },
|
||||
{ name = "pyside6", marker = "extra == 'gui'", specifier = ">=6.8.3,<7" },
|
||||
{ name = "pytest", marker = "extra == 'dev'", specifier = ">=7.4.0,<8" },
|
||||
{ name = "pytest-asyncio", marker = "extra == 'dev'", specifier = ">=0.21.1,<0.22" },
|
||||
{ name = "pytest-cov", marker = "extra == 'dev'", specifier = ">=6.1.1,<7.0.0" },
|
||||
{ name = "python-dotenv", specifier = ">=1.0.1,<2.0.0" },
|
||||
{ name = "python-multipart", specifier = ">=0.0.20,<1.0.0" },
|
||||
{ name = "qasync", marker = "extra == 'gui'", specifier = ">=0.27.1,<0.28" },
|
||||
{ name = "qdrant-client", marker = "extra == 'qdrant'", specifier = ">=1.14.2,<2" },
|
||||
{ name = "rdflib", specifier = ">=7.1.4,<7.2.0" },
|
||||
{ name = "ruff", marker = "extra == 'dev'", specifier = ">=0.9.2,<1.0.0" },
|
||||
{ name = "s3fs", extras = ["boto3"], specifier = "==2025.3.2" },
|
||||
{ name = "s3fs", extras = ["boto3"], marker = "extra == 'aws'", specifier = "==2025.3.2" },
|
||||
{ name = "scikit-learn", specifier = ">=1.6.1,<2" },
|
||||
{ name = "sentry-sdk", extras = ["fastapi"], specifier = ">=2.9.0,<3" },
|
||||
{ name = "sqlalchemy", specifier = ">=2.0.39,<3.0.0" },
|
||||
{ name = "structlog", specifier = ">=25.2.0,<26" },
|
||||
{ name = "tiktoken", specifier = ">=0.8.0,<1.0.0" },
|
||||
{ name = "transformers", marker = "extra == 'codegraph'", specifier = ">=4.46.3,<5" },
|
||||
{ name = "transformers", marker = "extra == 'huggingface'", specifier = ">=4.46.3,<5" },
|
||||
{ name = "transformers", marker = "extra == 'ollama'", specifier = ">=4.46.3,<5" },
|
||||
{ name = "tree-sitter", marker = "extra == 'codegraph'", specifier = ">=0.24.0,<0.25" },
|
||||
{ name = "tree-sitter-python", marker = "extra == 'codegraph'", specifier = ">=0.23.6,<0.24" },
|
||||
{ name = "tweepy", marker = "extra == 'dev'", specifier = ">=4.14.0,<5.0.0" },
|
||||
{ name = "typing-extensions", specifier = ">=4.12.2,<5.0.0" },
|
||||
{ name = "unstructured", extras = ["csv", "doc", "docx", "epub", "md", "odt", "org", "ppt", "pptx", "rst", "rtf", "tsv", "xlsx"], marker = "extra == 'docs'", specifier = ">=0.18.1,<19" },
|
||||
{ name = "uvicorn", marker = "extra == 'api'", specifier = ">=0.34.0,<1.0.0" },
|
||||
{ name = "websockets", marker = "extra == 'api'", specifier = ">=15.0.1,<16.0.0" },
|
||||
]
|
||||
provides-extras = ["api", "distributed", "qdrant", "neo4j", "postgres", "postgres-binary", "notebook", "langchain", "llama-index", "gemini", "huggingface", "ollama", "mistral", "anthropic", "deepeval", "posthog", "falkordb", "groq", "chromadb", "docs", "codegraph", "evals", "gui", "graphiti", "aws", "dev", "debug"]
|
||||
|
||||
[[package]]
|
||||
name = "cognee-mcp"
|
||||
version = "0.4.0"
|
||||
|
|
@ -804,7 +712,7 @@ dev = [
|
|||
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "cognee", extras = ["postgres", "codegraph", "gemini", "huggingface", "docs", "neo4j"], directory = "../" },
|
||||
{ name = "cognee", extras = ["postgres", "codegraph", "gemini", "huggingface", "docs", "neo4j"], specifier = "==0.2.1" },
|
||||
{ name = "fastmcp", specifier = ">=2.10.0,<3.0.0" },
|
||||
{ name = "mcp", specifier = ">=1.12.0,<2.0.0" },
|
||||
{ name = "uv", specifier = ">=0.6.3,<1.0.0" },
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue