Merge branch 'dev' into feature/cog-3160-redis-session-conversation

This commit is contained in:
hajdul88 2025-10-20 10:05:57 +02:00 committed by GitHub
commit 4d3ba3f313
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
15 changed files with 160 additions and 1303 deletions

View file

@ -110,6 +110,81 @@ jobs:
EMBEDDING_API_VERSION: ${{ secrets.EMBEDDING_API_VERSION }}
run: uv run python ./examples/python/dynamic_steps_example.py
test-temporal-example:
name: Run Temporal Tests
runs-on: ubuntu-22.04
steps:
- name: Check out repository
uses: actions/checkout@v4
- name: Cognee Setup
uses: ./.github/actions/cognee_setup
with:
python-version: '3.11.x'
- name: Run Temporal Example
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
LLM_MODEL: ${{ secrets.LLM_MODEL }}
LLM_ENDPOINT: ${{ secrets.LLM_ENDPOINT }}
LLM_API_KEY: ${{ secrets.LLM_API_KEY }}
LLM_API_VERSION: ${{ secrets.LLM_API_VERSION }}
EMBEDDING_MODEL: ${{ secrets.EMBEDDING_MODEL }}
EMBEDDING_ENDPOINT: ${{ secrets.EMBEDDING_ENDPOINT }}
EMBEDDING_API_KEY: ${{ secrets.EMBEDDING_API_KEY }}
EMBEDDING_API_VERSION: ${{ secrets.EMBEDDING_API_VERSION }}
run: uv run python ./examples/python/temporal_example.py
test-ontology-example:
name: Run Ontology Tests
runs-on: ubuntu-22.04
steps:
- name: Check out repository
uses: actions/checkout@v4
- name: Cognee Setup
uses: ./.github/actions/cognee_setup
with:
python-version: '3.11.x'
- name: Run Ontology Demo Example
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
LLM_MODEL: ${{ secrets.LLM_MODEL }}
LLM_ENDPOINT: ${{ secrets.LLM_ENDPOINT }}
LLM_API_KEY: ${{ secrets.LLM_API_KEY }}
LLM_API_VERSION: ${{ secrets.LLM_API_VERSION }}
EMBEDDING_MODEL: ${{ secrets.EMBEDDING_MODEL }}
EMBEDDING_ENDPOINT: ${{ secrets.EMBEDDING_ENDPOINT }}
EMBEDDING_API_KEY: ${{ secrets.EMBEDDING_API_KEY }}
EMBEDDING_API_VERSION: ${{ secrets.EMBEDDING_API_VERSION }}
run: uv run python ./examples/python/ontology_demo_example.py
test-agentic-reasoning:
name: Run Agentic Reasoning Tests
runs-on: ubuntu-22.04
steps:
- name: Check out repository
uses: actions/checkout@v4
- name: Cognee Setup
uses: ./.github/actions/cognee_setup
with:
python-version: '3.11.x'
- name: Run Agentic Reasoning Example
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
LLM_MODEL: ${{ secrets.LLM_MODEL }}
LLM_ENDPOINT: ${{ secrets.LLM_ENDPOINT }}
LLM_API_KEY: ${{ secrets.LLM_API_KEY }}
LLM_API_VERSION: ${{ secrets.LLM_API_VERSION }}
EMBEDDING_MODEL: ${{ secrets.EMBEDDING_MODEL }}
EMBEDDING_ENDPOINT: ${{ secrets.EMBEDDING_ENDPOINT }}
EMBEDDING_API_KEY: ${{ secrets.EMBEDDING_API_KEY }}
EMBEDDING_API_VERSION: ${{ secrets.EMBEDDING_API_VERSION }}
run: uv run python ./examples/python/agentic_reasoning_procurement_example.py
test-memify:
name: Run Memify Example
runs-on: ubuntu-22.04

View file

@ -9,7 +9,7 @@ on:
python-versions:
required: false
type: string
default: '["3.10.x", "3.11.x", "3.12.x"]'
default: '["3.10.x", "3.12.x", "3.13.x"]'
secrets:
LLM_PROVIDER:
required: true

View file

@ -85,7 +85,7 @@ jobs:
needs: [basic-tests, e2e-tests]
uses: ./.github/workflows/test_different_operating_systems.yml
with:
python-versions: '["3.10.x", "3.11.x", "3.12.x"]'
python-versions: '["3.10.x", "3.11.x", "3.12.x", "3.13.x"]'
secrets: inherit
# Matrix-based vector database tests

View file

@ -41,6 +41,7 @@ async def add(
extraction_rules: Optional[Dict[str, Any]] = None,
tavily_config: Optional[BaseModel] = None,
soup_crawler_config: Optional[BaseModel] = None,
data_per_batch: Optional[int] = 20,
):
"""
Add data to Cognee for knowledge graph processing.
@ -235,6 +236,7 @@ async def add(
vector_db_config=vector_db_config,
graph_db_config=graph_db_config,
incremental_loading=incremental_loading,
data_per_batch=data_per_batch,
):
pipeline_run_info = run_info

View file

@ -51,6 +51,7 @@ async def cognify(
incremental_loading: bool = True,
custom_prompt: Optional[str] = None,
temporal_cognify: bool = False,
data_per_batch: int = 20,
):
"""
Transform ingested data into a structured knowledge graph.
@ -228,6 +229,7 @@ async def cognify(
graph_db_config=graph_db_config,
incremental_loading=incremental_loading,
pipeline_name="cognify_pipeline",
data_per_batch=data_per_batch,
)

View file

@ -185,13 +185,6 @@ async def search(
if not datasets:
raise DatasetNotFoundError(message="No datasets found.")
graph_engine = await get_graph_engine()
is_empty = await graph_engine.is_empty()
if is_empty:
logger.warning("Search attempt on an empty knowledge graph")
return []
filtered_search_results = await search_function(
query_text=query_text,
query_type=query_type,

View file

@ -162,5 +162,5 @@ def create_graph_engine(
raise EnvironmentError(
f"Unsupported graph database provider: {graph_database_provider}. "
f"Supported providers are: {', '.join(list(supported_databases.keys()) + ['neo4j', 'kuzu', 'kuzu-remote', 'memgraph', 'neptune', 'neptune_analytics'])}"
f"Supported providers are: {', '.join(list(supported_databases.keys()) + ['neo4j', 'kuzu', 'kuzu-remote', 'neptune', 'neptune_analytics'])}"
)

View file

@ -105,7 +105,6 @@ class LoaderEngine:
async def load_file(
self,
file_path: str,
file_stream: Optional[Any],
preferred_loaders: Optional[List[str]] = None,
**kwargs,
):

View file

@ -35,6 +35,7 @@ async def run_pipeline(
vector_db_config: dict = None,
graph_db_config: dict = None,
incremental_loading: bool = False,
data_per_batch: int = 20,
):
validate_pipeline_tasks(tasks)
await setup_and_check_environment(vector_db_config, graph_db_config)
@ -50,6 +51,7 @@ async def run_pipeline(
pipeline_name=pipeline_name,
context={"dataset": dataset},
incremental_loading=incremental_loading,
data_per_batch=data_per_batch,
):
yield run_info
@ -62,6 +64,7 @@ async def run_pipeline_per_dataset(
pipeline_name: str = "custom_pipeline",
context: dict = None,
incremental_loading=False,
data_per_batch: int = 20,
):
# Will only be used if ENABLE_BACKEND_ACCESS_CONTROL is set to True
await set_database_global_context_variables(dataset.id, dataset.owner_id)
@ -77,7 +80,7 @@ async def run_pipeline_per_dataset(
return
pipeline_run = run_tasks(
tasks, dataset.id, data, user, pipeline_name, context, incremental_loading
tasks, dataset.id, data, user, pipeline_name, context, incremental_loading, data_per_batch
)
async for pipeline_run_info in pipeline_run:

View file

@ -24,7 +24,6 @@ from cognee.modules.pipelines.operations import (
log_pipeline_run_complete,
log_pipeline_run_error,
)
from .run_tasks_with_telemetry import run_tasks_with_telemetry
from .run_tasks_data_item import run_tasks_data_item
from ..tasks.task import Task
@ -60,6 +59,7 @@ async def run_tasks(
pipeline_name: str = "unknown_pipeline",
context: dict = None,
incremental_loading: bool = False,
data_per_batch: int = 20,
):
if not user:
user = await get_default_user()
@ -89,24 +89,29 @@ async def run_tasks(
if incremental_loading:
data = await resolve_data_directories(data)
# Create async tasks per data item that will run the pipeline for the data item
data_item_tasks = [
asyncio.create_task(
run_tasks_data_item(
data_item,
dataset,
tasks,
pipeline_name,
pipeline_id,
pipeline_run_id,
context,
user,
incremental_loading,
# Create and gather batches of async tasks of data items that will run the pipeline for the data item
results = []
for start in range(0, len(data), data_per_batch):
data_batch = data[start : start + data_per_batch]
data_item_tasks = [
asyncio.create_task(
run_tasks_data_item(
data_item,
dataset,
tasks,
pipeline_name,
pipeline_id,
pipeline_run_id,
context,
user,
incremental_loading,
)
)
)
for data_item in data
]
results = await asyncio.gather(*data_item_tasks)
for data_item in data_batch
]
results.extend(await asyncio.gather(*data_item_tasks))
# Remove skipped data items from results
results = [result for result in results if result]

View file

@ -1,105 +0,0 @@
import os
import pathlib
import cognee
from cognee.infrastructure.files.storage import get_storage_config
from cognee.modules.search.operations import get_history
from cognee.modules.users.methods import get_default_user
from cognee.shared.logging_utils import get_logger
from cognee.modules.search.types import SearchType
logger = get_logger()
async def main():
cognee.config.set_graph_database_provider("memgraph")
data_directory_path = str(
pathlib.Path(
os.path.join(pathlib.Path(__file__).parent, ".data_storage/test_memgraph")
).resolve()
)
cognee.config.data_root_directory(data_directory_path)
cognee_directory_path = str(
pathlib.Path(
os.path.join(pathlib.Path(__file__).parent, ".cognee_system/test_memgraph")
).resolve()
)
cognee.config.system_root_directory(cognee_directory_path)
await cognee.prune.prune_data()
await cognee.prune.prune_system(metadata=True)
dataset_name = "cs_explanations"
explanation_file_path_nlp = os.path.join(
pathlib.Path(__file__).parent, "test_data/Natural_language_processing.txt"
)
await cognee.add([explanation_file_path_nlp], dataset_name)
explanation_file_path_quantum = os.path.join(
pathlib.Path(__file__).parent, "test_data/Quantum_computers.txt"
)
await cognee.add([explanation_file_path_quantum], dataset_name)
await cognee.cognify([dataset_name])
from cognee.infrastructure.databases.vector import get_vector_engine
vector_engine = get_vector_engine()
random_node = (await vector_engine.search("Entity_name", "Quantum computer"))[0]
random_node_name = random_node.payload["text"]
search_results = await cognee.search(
query_type=SearchType.GRAPH_COMPLETION, query_text=random_node_name
)
assert len(search_results) != 0, "The search results list is empty."
print("\n\nExtracted sentences are:\n")
for result in search_results:
print(f"{result}\n")
search_results = await cognee.search(query_type=SearchType.CHUNKS, query_text=random_node_name)
assert len(search_results) != 0, "The search results list is empty."
print("\n\nExtracted chunks are:\n")
for result in search_results:
print(f"{result}\n")
search_results = await cognee.search(
query_type=SearchType.SUMMARIES, query_text=random_node_name
)
assert len(search_results) != 0, "Query related summaries don't exist."
print("\nExtracted results are:\n")
for result in search_results:
print(f"{result}\n")
search_results = await cognee.search(
query_type=SearchType.NATURAL_LANGUAGE,
query_text=f"Find nodes connected to node with name {random_node_name}",
)
assert len(search_results) != 0, "Query related natural language don't exist."
print("\nExtracted results are:\n")
for result in search_results:
print(f"{result}\n")
user = await get_default_user()
history = await get_history(user.id)
assert len(history) == 8, "Search history is not correct."
await cognee.prune.prune_data()
data_root_directory = get_storage_config()["data_root_directory"]
assert not os.path.isdir(data_root_directory), "Local data files are not deleted"
await cognee.prune.prune_system(metadata=True)
from cognee.infrastructure.databases.graph import get_graph_engine
graph_engine = await get_graph_engine()
nodes, edges = await graph_engine.get_graph_data()
assert len(nodes) == 0 and len(edges) == 0, "Memgraph graph database is not empty"
if __name__ == "__main__":
import asyncio
asyncio.run(main())

View file

@ -83,16 +83,16 @@
]
},
{
"metadata": {},
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import pathlib\n",
"from cognee import config, add, cognify, search, SearchType, prune, visualize_graph\n",
"from dotenv import load_dotenv"
],
"outputs": [],
"execution_count": null
]
},
{
"cell_type": "markdown",
@ -106,7 +106,9 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# load environment variables from file .env\n",
"load_dotenv()\n",
@ -145,9 +147,7 @@
" \"vector_db_url\": f\"neptune-graph://{graph_identifier}\", # Neptune Analytics endpoint with the format neptune-graph://<GRAPH_ID>\n",
" }\n",
")"
],
"outputs": [],
"execution_count": null
]
},
{
"cell_type": "markdown",
@ -159,19 +159,19 @@
]
},
{
"metadata": {},
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Prune data and system metadata before running, only if we want \"fresh\" state.\n",
"await prune.prune_data()\n",
"await prune.prune_system(metadata=True)"
],
"outputs": [],
"execution_count": null
]
},
{
"metadata": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"## Setup data and cognify\n",
"\n",
@ -180,7 +180,9 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Add sample text to the dataset\n",
"sample_text_1 = \"\"\"Neptune Analytics is a memory-optimized graph database engine for analytics. With Neptune\n",
@ -205,9 +207,7 @@
"\n",
"# Cognify the text data.\n",
"await cognify([dataset_name])"
],
"outputs": [],
"execution_count": null
]
},
{
"cell_type": "markdown",
@ -215,14 +215,16 @@
"source": [
"## Graph Memory visualization\n",
"\n",
"Initialize Memgraph as a Graph Memory store and save to .artefacts/graph_visualization.html\n",
"Initialize Neptune as a Graph Memory store and save to .artefacts/graph_visualization.html\n",
"\n",
"![visualization](./neptune_analytics_demo.png)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Get a graphistry url (Register for a free account at https://www.graphistry.com)\n",
"# url = await render_graph()\n",
@ -235,9 +237,7 @@
" ).resolve()\n",
")\n",
"await visualize_graph(graph_file_path)"
],
"outputs": [],
"execution_count": null
]
},
{
"cell_type": "markdown",
@ -250,19 +250,19 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Completion query that uses graph data to form context.\n",
"graph_completion = await search(query_text=\"What is Neptune Analytics?\", query_type=SearchType.GRAPH_COMPLETION)\n",
"print(\"\\nGraph completion result is:\")\n",
"print(graph_completion)"
],
"outputs": [],
"execution_count": null
]
},
{
"metadata": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"## SEARCH: RAG Completion\n",
"\n",
@ -271,19 +271,19 @@
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Completion query that uses document chunks to form context.\n",
"rag_completion = await search(query_text=\"What is Neptune Analytics?\", query_type=SearchType.RAG_COMPLETION)\n",
"print(\"\\nRAG Completion result is:\")\n",
"print(rag_completion)"
],
"outputs": [],
"execution_count": null
]
},
{
"metadata": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"## SEARCH: Graph Insights\n",
"\n",
@ -291,8 +291,10 @@
]
},
{
"metadata": {},
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Search graph insights\n",
"insights_results = await search(query_text=\"Neptune Analytics\", query_type=SearchType.GRAPH_COMPLETION)\n",
@ -302,13 +304,11 @@
" tgt_node = result[2].get(\"name\", result[2][\"type\"])\n",
" relationship = result[1].get(\"relationship_name\", \"__relationship__\")\n",
" print(f\"- {src_node} -[{relationship}]-> {tgt_node}\")"
],
"outputs": [],
"execution_count": null
]
},
{
"metadata": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"## SEARCH: Entity Summaries\n",
"\n",
@ -316,8 +316,10 @@
]
},
{
"metadata": {},
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Query all summaries related to query.\n",
"summaries = await search(query_text=\"Neptune Analytics\", query_type=SearchType.SUMMARIES)\n",
@ -326,13 +328,11 @@
" type = summary[\"type\"]\n",
" text = summary[\"text\"]\n",
" print(f\"- {type}: {text}\")"
],
"outputs": [],
"execution_count": null
]
},
{
"metadata": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"## SEARCH: Chunks\n",
"\n",
@ -340,8 +340,10 @@
]
},
{
"metadata": {},
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"chunks = await search(query_text=\"Neptune Analytics\", query_type=SearchType.CHUNKS)\n",
"print(\"\\nChunk results are:\")\n",
@ -349,9 +351,7 @@
" type = chunk[\"type\"]\n",
" text = chunk[\"text\"]\n",
" print(f\"- {type}: {text}\")"
],
"outputs": [],
"execution_count": null
]
}
],
"metadata": {

13
poetry.lock generated
View file

@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand.
# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand.
[[package]]
name = "accelerate"
@ -6633,7 +6633,7 @@ description = "Fundamental package for array computing in Python"
optional = false
python-versions = ">=3.11"
groups = ["main"]
markers = "python_version == \"3.12\" or python_full_version == \"3.13.0\""
markers = "python_version >= \"3.12\""
files = [
{file = "numpy-2.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0ffc4f5caba7dfcbe944ed674b7eef683c7e94874046454bb79ed7ee0236f59d"},
{file = "numpy-2.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e7e946c7170858a0295f79a60214424caac2ffdb0063d4d79cb681f9aa0aa569"},
@ -8532,7 +8532,6 @@ files = [
{file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"},
{file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"},
{file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"},
{file = "psycopg2-2.9.10-cp313-cp313-win_amd64.whl", hash = "sha256:91fd603a2155da8d0cfcdbf8ab24a2d54bca72795b90d2a3ed2b6da8d979dee2"},
{file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"},
{file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"},
{file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"},
@ -11203,7 +11202,7 @@ description = "Easily download, build, install, upgrade, and uninstall Python pa
optional = true
python-versions = ">=3.9"
groups = ["main"]
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and (extra == \"docs\" or extra == \"docling\" or extra == \"notebook\" or extra == \"dev\" or extra == \"llama-index\" or extra == \"deepeval\" or extra == \"dlt\") or python_version == \"3.12\" and (extra == \"notebook\" or extra == \"dev\" or extra == \"llama-index\" or extra == \"deepeval\" or extra == \"dlt\" or extra == \"docs\" or extra == \"docling\") or python_full_version == \"3.13.0\" and (extra == \"notebook\" or extra == \"dev\" or extra == \"llama-index\" or extra == \"deepeval\" or extra == \"dlt\" or extra == \"docs\" or extra == \"docling\") or extra == \"notebook\" or extra == \"dev\" or extra == \"llama-index\" or extra == \"deepeval\" or extra == \"dlt\""
markers = "platform_system == \"Linux\" and platform_machine == \"x86_64\" and (extra == \"docs\" or extra == \"docling\" or extra == \"notebook\" or extra == \"dev\" or extra == \"llama-index\" or extra == \"deepeval\" or extra == \"dlt\") or python_version >= \"3.12\" and (extra == \"notebook\" or extra == \"dev\" or extra == \"llama-index\" or extra == \"deepeval\" or extra == \"dlt\" or extra == \"docs\" or extra == \"docling\") or extra == \"notebook\" or extra == \"dev\" or extra == \"llama-index\" or extra == \"deepeval\" or extra == \"dlt\""
files = [
{file = "setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922"},
{file = "setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c"},
@ -13510,7 +13509,7 @@ dev = ["coverage", "deptry", "gitpython", "mkdocs-material", "mkdocs-minify-plug
distributed = ["modal"]
dlt = ["dlt"]
docling = ["docling", "transformers"]
docs = ["unstructured"]
docs = ["lxml", "unstructured"]
evals = ["gdown", "matplotlib", "pandas", "plotly", "scikit-learn"]
graphiti = ["graphiti-core"]
groq = ["groq"]
@ -13531,5 +13530,5 @@ scraping = ["APScheduler", "beautifulsoup4", "lxml", "playwright", "protego", "t
[metadata]
lock-version = "2.1"
python-versions = ">=3.10,<=3.13"
content-hash = "8d8172ac8ddc3c30ca79a1677ecf2a28897d52c0a564d8fb5646c8565c313a0f"
python-versions = ">=3.10,<3.14"
content-hash = "bcab5420339473ec08b89cde588899b60999762fb8ca9a011240d47ea86198e3"

View file

@ -7,7 +7,7 @@ authors = [
{ name = "Vasilije Markovic" },
{ name = "Boris Arzentar" },
]
requires-python = ">=3.10,<=3.13"
requires-python = ">=3.10,<3.14"
readme = "README.md"
license = "Apache-2.0"
classifiers = [
@ -65,10 +65,10 @@ distributed = [
"modal>=1.0.5,<2.0.0",
]
scraping = [
"tavily-python>=0.7.0",
"tavily-python>=0.7.12",
"beautifulsoup4>=4.13.1",
"playwright>=1.9.0",
"lxml>=4.9.3,<5.0.0",
"lxml>=4.9.3",
"protego>=0.1",
"APScheduler>=3.10.0,<=3.11.0"
]
@ -101,7 +101,7 @@ chromadb = [
"chromadb>=0.6,<0.7",
"pypika==0.48.9",
]
docs = ["unstructured[csv, doc, docx, epub, md, odt, org, ppt, pptx, rst, rtf, tsv, xlsx, pdf]>=0.18.1,<19"]
docs = ["lxml<6.0.0", "unstructured[csv, doc, docx, epub, md, odt, org, ppt, pptx, rst, rtf, tsv, xlsx, pdf]>=0.18.1,<19"]
codegraph = [
"fastembed<=0.6.0 ; python_version < '3.13'",
"transformers>=4.46.3,<5",