Merge branch 'main' of github.com:topoteretes/cognee into COG-382-refactor-demo-notebook

This commit is contained in:
Igor Ilic 2024-10-08 13:26:36 +02:00
commit 6f65a332b9
93 changed files with 878 additions and 1515 deletions

1
.gitignore vendored
View file

@ -10,7 +10,6 @@ __pycache__/
*.py[cod]
*$py.class
notebooks/
full_run.ipynb
evals/

View file

@ -65,16 +65,21 @@ You can use different LLM providers, for more info check out our <a href="https:
If you are using Networkx, create an account on Graphistry to visualize results:
```
cognee.config.set_graphistry_username = "YOUR_USERNAME"
cognee.config.set_graphistry_password = "YOUR_PASSWORD"
cognee.config.set_graphistry_config({
"username": "YOUR_USERNAME",
"password": "YOUR_PASSWORD"
})
```
(Optional) To run the UI, run:
(Optional) To run the UI, go to cognee-frontend directory and run:
```
docker-compose up cognee
npm run dev
```
Then navigate to localhost:3000/wizard
or run everything in a docker container:
```
docker-compose up
```
Then navigate to localhost:3000
### Simple example
@ -86,14 +91,14 @@ import cognee
text = """Natural language processing (NLP) is an interdisciplinary
subfield of computer science and information retrieval"""
await cognee.add([text], "example_dataset") # Add a new piece of information
await cognee.add(text) # Add a new piece of information
await cognee.cognify() # Use LLMs and cognee to create a semantic graph
await cognee.cognify() # Use LLMs and cognee to create a knowledge graph
await search_results = cognee.search("SIMILARITY", {'query': 'Tell me about NLP'}) # Query cognee for the knowledge
print(search_results)
search_results = await cognee.search("INSIGHTS", {'query': 'NLP'}) # Query cognee for the insights
for result in search_results:
do_something_with_result(result)
```
@ -115,7 +120,7 @@ Start with:
text = """Natural language processing (NLP) is an interdisciplinary
subfield of computer science and information retrieval"""
await cognee.add([text], "example_dataset") # Add a new piece of information
await cognee.add(text) # Add a new piece of information
```
2. In the next step we make a task. The task can be any business logic we need, but the important part is that it should be encapsulated in one function.

117
alembic.ini Normal file
View file

@ -0,0 +1,117 @@
# A generic, single database configuration.
[alembic]
# path to migration scripts
# Use forward slashes (/) also on windows to provide an os agnostic path
script_location = alembic
# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
# Uncomment the line below if you want the files to be prepended with date and time
# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
# for all available tokens
# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
# sys.path path, will be prepended to sys.path if present.
# defaults to the current working directory.
prepend_sys_path = .
# timezone to use when rendering the date within the migration file
# as well as the filename.
# If specified, requires the python>=3.9 or backports.zoneinfo library.
# Any required deps can installed by adding `alembic[tz]` to the pip requirements
# string value is passed to ZoneInfo()
# leave blank for localtime
# timezone =
# max length of characters to apply to the "slug" field
# truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
# sourceless = false
# version location specification; This defaults
# to alembic/versions. When using multiple version
# directories, initial revisions must be specified with --version-path.
# The path separator used here should be the separator specified by "version_path_separator" below.
# version_locations = %(here)s/bar:%(here)s/bat:alembic/versions
# version path separator; As mentioned above, this is the character used to split
# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
# Valid values for version_path_separator are:
#
# version_path_separator = :
# version_path_separator = ;
# version_path_separator = space
# version_path_separator = newline
version_path_separator = os # Use os.pathsep. Default configuration used for new projects.
# set to 'true' to search source files recursively
# in each "version_locations" directory
# new in Alembic version 1.10
# recursive_version_locations = false
# the output encoding used when revision files
# are written from script.py.mako
# output_encoding = utf-8
sqlalchemy.url = %(SQLALCHEMY_DATABASE_URI)s
[post_write_hooks]
# post_write_hooks defines scripts or Python functions that are run
# on newly generated revision scripts. See the documentation for further
# detail and examples
# format using "black" - use the console_scripts runner, against the "black" entrypoint
# hooks = black
# black.type = console_scripts
# black.entrypoint = black
# black.options = -l 79 REVISION_SCRIPT_FILENAME
# lint with attempts to fix using "ruff" - use the exec runner, execute a binary
# hooks = ruff
# ruff.type = exec
# ruff.executable = %(here)s/.venv/bin/ruff
# ruff.options = --fix REVISION_SCRIPT_FILENAME
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S

1
alembic/README Normal file
View file

@ -0,0 +1 @@
Generic single-database configuration with an async dbapi.

106
alembic/env.py Normal file
View file

@ -0,0 +1,106 @@
import asyncio
from logging.config import fileConfig
from sqlalchemy import pool
from sqlalchemy.engine import Connection
from sqlalchemy.ext.asyncio import async_engine_from_config
from alembic import context
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
if config.config_file_name is not None:
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from cognee.infrastructure.databases.relational import Base
target_metadata = Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline() -> None:
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def do_run_migrations(connection: Connection) -> None:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
async def run_async_migrations() -> None:
"""In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = async_engine_from_config(
config.get_section(config.config_ini_section, {}),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
async with connectable.connect() as connection:
await connection.run_sync(do_run_migrations)
await connectable.dispose()
def run_migrations_online() -> None:
"""Run migrations in 'online' mode."""
asyncio.run(run_async_migrations())
from cognee.infrastructure.databases.relational import get_relational_engine, get_relational_config
db_engine = get_relational_engine()
if db_engine.engine.dialect.name == "sqlite":
from cognee.infrastructure.files.storage import LocalStorage
db_config = get_relational_config()
LocalStorage.ensure_directory_exists(db_config.db_path)
config.set_section_option(
config.config_ini_section,
"SQLALCHEMY_DATABASE_URI",
db_engine.db_uri,
)
if context.is_offline_mode():
print("OFFLINE MODE")
run_migrations_offline()
else:
run_migrations_online()

26
alembic/script.py.mako Normal file
View file

@ -0,0 +1,26 @@
"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
# revision identifiers, used by Alembic.
revision: str = ${repr(up_revision)}
down_revision: Union[str, None] = ${repr(down_revision)}
branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
def upgrade() -> None:
${upgrades if upgrades else "pass"}
def downgrade() -> None:
${downgrades if downgrades else "pass"}

View file

@ -0,0 +1,26 @@
"""Initial migration
Revision ID: 8057ae7329c2
Revises:
Create Date: 2024-10-02 12:55:20.989372
"""
from typing import Sequence, Union
from sqlalchemy.util import await_only
from cognee.infrastructure.databases.relational import get_relational_engine
# revision identifiers, used by Alembic.
revision: str = "8057ae7329c2"
down_revision: Union[str, None] = None
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
db_engine = get_relational_engine()
await_only(db_engine.create_database())
def downgrade() -> None:
db_engine = get_relational_engine()
await_only(db_engine.delete_database())

View file

@ -25,23 +25,40 @@ export default function SearchView() {
}, []);
const searchOptions = [{
value: 'SIMILARITY',
label: 'Look for similar graph nodes',
value: 'INSIGHTS',
label: 'Query insights from documents',
}, {
value: 'SUMMARY',
label: 'Get a summary related to query',
value: 'SUMMARIES',
label: 'Query document summaries',
}, {
value: 'ADJACENT',
label: 'Look for graph node\'s neighbors',
}, {
value: 'TRAVERSE',
label: 'Traverse through the graph and get knowledge',
value: 'CHUNKS',
label: 'Query document chunks',
}];
const [searchType, setSearchType] = useState(searchOptions[0]);
const scrollToBottom = useCallback(() => {
setTimeout(() => {
const messagesContainerElement = document.getElementById('messages');
if (messagesContainerElement) {
const messagesElements = messagesContainerElement.children[0];
if (messagesElements) {
messagesContainerElement.scrollTo({
top: messagesElements.scrollHeight,
behavior: 'smooth',
});
}
}
}, 300);
}, []);
const handleSearchSubmit = useCallback((event: React.FormEvent<HTMLFormElement>) => {
event.preventDefault();
if (inputValue.trim() === '') {
return;
}
setMessages((currentMessages) => [
...currentMessages,
{
@ -51,16 +68,18 @@ export default function SearchView() {
},
]);
scrollToBottom();
const searchTypeValue = searchType.value;
fetch('/v1/search', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
query_params: {
query: inputValue,
searchType: searchType.value,
},
query: inputValue,
searchType: searchTypeValue,
}),
})
.then((response) => response.json())
@ -70,12 +89,14 @@ export default function SearchView() {
{
id: v4(),
user: 'system',
text: systemMessage,
text: convertToSearchTypeOutput(systemMessage, searchTypeValue),
},
]);
setInputValue('');
scrollToBottom();
})
}, [inputValue, searchType]);
}, [inputValue, scrollToBottom, searchType.value]);
const {
value: isInputExpanded,
@ -83,6 +104,12 @@ export default function SearchView() {
setFalse: contractInput,
} = useBoolean(false);
const handleSubmitOnEnter = (event: React.KeyboardEvent<HTMLTextAreaElement>) => {
if (event.key === 'Enter' && !event.shiftKey) {
handleSearchSubmit(event as unknown as React.FormEvent<HTMLFormElement>);
}
};
return (
<Stack className={styles.searchViewContainer}>
<DropdownSelect<SelectOption>
@ -90,7 +117,7 @@ export default function SearchView() {
options={searchOptions}
onChange={setSearchType}
/>
<div className={styles.messagesContainer}>
<div className={styles.messagesContainer} id="messages">
<Stack gap="2" className={styles.messages} align="end">
{messages.map((message) => (
<Text
@ -108,10 +135,41 @@ export default function SearchView() {
</div>
<form onSubmit={handleSearchSubmit}>
<Stack orientation="horizontal" align="end/" gap="2">
<TextArea style={{ height: isInputExpanded ? '128px' : '38px' }} onFocus={expandInput} onBlur={contractInput} value={inputValue} onChange={handleInputChange} name="searchInput" placeholder="Search" />
<TextArea onKeyUp={handleSubmitOnEnter} style={{ transition: 'height 0.3s ease', height: isInputExpanded ? '128px' : '38px' }} onFocus={expandInput} onBlur={contractInput} value={inputValue} onChange={handleInputChange} name="searchInput" placeholder="Search" />
<CTAButton hugContent type="submit">Search</CTAButton>
</Stack>
</form>
</Stack>
);
}
interface Node {
name: string;
}
interface Relationship {
relationship_name: string;
}
type InsightMessage = [Node, Relationship, Node];
function convertToSearchTypeOutput(systemMessages: any[], searchType: string): string {
switch (searchType) {
case 'INSIGHTS':
return systemMessages.map((message: InsightMessage) => {
const [node1, relationship, node2] = message;
if (node1.name && node2.name) {
return `${node1.name} ${relationship.relationship_name} ${node2.name}.`;
}
return '';
}).join('\n');
case 'SUMMARIES':
return systemMessages.map((message: { text: string }) => message.text).join('\n');
case 'CHUNKS':
return systemMessages.map((message: { text: string }) => message.text).join('\n');
default:
return '';
}
}

View file

@ -2,7 +2,7 @@ from .api.v1.config.config import config
from .api.v1.add import add
from .api.v1.cognify import cognify
from .api.v1.datasets.datasets import datasets
from .api.v1.search.search import search, SearchType
from .api.v1.search import search, SearchType
from .api.v1.prune import prune
# Pipelines

View file

@ -7,9 +7,10 @@ import sentry_sdk
from typing import Dict, Any, List, Union, Optional, Literal
from typing_extensions import Annotated
from fastapi import FastAPI, HTTPException, Form, UploadFile, Query, Depends
from fastapi.responses import JSONResponse, FileResponse
from fastapi.responses import JSONResponse, FileResponse, Response
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from cognee.api.v1.search import SearchType
from cognee.modules.users.models import User
from cognee.modules.users.methods import get_authenticated_user
@ -30,20 +31,24 @@ if os.getenv("ENV", "prod") == "prod":
from contextlib import asynccontextmanager
app_environment = os.getenv("ENV", "prod")
@asynccontextmanager
async def lifespan(app: FastAPI):
from cognee.infrastructure.databases.relational import create_db_and_tables
# from cognee.modules.data.deletion import prune_system, prune_data
# await prune_data()
# await prune_system(metadata = True)
if app_environment == "local" or app_environment == "dev":
from cognee.infrastructure.databases.relational import get_relational_engine
db_engine = get_relational_engine()
await db_engine.create_database()
from cognee.modules.data.deletion import prune_system, prune_data
await prune_data()
await prune_system(metadata = True)
from cognee.modules.users.methods import get_default_user
await get_default_user()
# Not needed if you setup a migration system like Alembic
await create_db_and_tables()
yield
app = FastAPI(debug = os.getenv("ENV", "prod") != "prod", lifespan = lifespan)
app = FastAPI(debug = app_environment != "prod", lifespan = lifespan)
app.add_middleware(
CORSMiddleware,
@ -124,7 +129,7 @@ def health_check():
"""
Health check endpoint that returns the server status.
"""
return {"status": "OK"}
return Response(status_code = 200)
@app.get("/api/v1/datasets", response_model = list)
async def get_datasets(user: User = Depends(get_authenticated_user)):
@ -326,20 +331,15 @@ async def cognify(payload: CognifyPayload, user: User = Depends(get_authenticate
)
class SearchPayload(BaseModel):
query_params: Dict[str, Any]
searchType: SearchType
query: str
@app.post("/api/v1/search", response_model=dict)
@app.post("/api/v1/search", response_model=list)
async def search(payload: SearchPayload, user: User = Depends(get_authenticated_user)):
""" This endpoint is responsible for searching for nodes in the graph."""
from cognee.api.v1.search import search as cognee_search
try:
search_type = payload.query_params["searchType"]
params = {
"query": payload.query_params["query"],
}
results = await cognee_search(search_type, params)
results = await cognee_search(payload.searchType, payload.query, user)
return JSONResponse(
status_code = 200,

View file

@ -14,8 +14,7 @@ from cognee.modules.users.models import User
from cognee.modules.users.methods import get_default_user
from cognee.modules.pipelines.operations.get_pipeline_status import get_pipeline_status
from cognee.modules.pipelines.operations.log_pipeline_status import log_pipeline_status
from cognee.tasks import chunk_extract_summary, \
chunk_naive_llm_classifier, \
from cognee.tasks import chunk_naive_llm_classifier, \
chunk_remove_disconnected, \
infer_data_ontology, \
save_chunks_to_store, \
@ -24,6 +23,7 @@ from cognee.tasks import chunk_extract_summary, \
source_documents_to_chunks, \
check_permissions_on_documents, \
classify_documents
from cognee.tasks.summarization import summarize_text
logger = logging.getLogger("cognify.v2")
@ -101,10 +101,10 @@ async def run_cognify_pipeline(dataset: Dataset, user: User):
), # Save the document chunks in vector db and as nodes in graph db (connected to the document node and between each other)
run_tasks_parallel([
Task(
chunk_extract_summary,
summarize_text,
summarization_model = cognee_config.summarization_model,
collection_name = "chunk_summaries",
), # Summarize the document chunks
collection_name = "summaries",
),
Task(
chunk_naive_llm_classifier,
classification_model = cognee_config.classification_model,

View file

@ -107,14 +107,11 @@ class config():
vector_db_config.vector_db_url = db_url
@staticmethod
def set_graphistry_username(graphistry_username: str):
def set_graphistry_config(graphistry_config: dict[str, str]):
base_config = get_base_config()
base_config.graphistry_username = graphistry_username
@staticmethod
def set_graphistry_password(graphistry_password: str):
base_config = get_base_config()
base_config.graphistry_password = graphistry_password
if "username" not in graphistry_config or "password" not in graphistry_config:
raise ValueError("graphistry_config dictionary must contain 'username' and 'password' keys.")
base_config.graphistry_username = graphistry_config.username
base_config.graphistry_password = graphistry_config.password

View file

@ -1 +1 @@
from .search import search, SearchType
from .search_v2 import search, SearchType

View file

@ -0,0 +1,56 @@
from uuid import UUID
from enum import Enum
from typing import Callable, Dict
from cognee.shared.utils import send_telemetry
from cognee.modules.users.models import User
from cognee.modules.users.methods import get_default_user
from cognee.modules.users.permissions.methods import get_document_ids_for_user
from cognee.tasks.chunking import query_chunks
from cognee.tasks.graph import query_graph_connections
from cognee.tasks.summarization import query_summaries
class SearchType(Enum):
SUMMARIES = "SUMMARIES"
INSIGHTS = "INSIGHTS"
CHUNKS = "CHUNKS"
async def search(search_type: SearchType, query: str, user: User = None) -> list:
if user is None:
user = await get_default_user()
if user is None:
raise PermissionError("No user found in the system. Please create a user.")
own_document_ids = await get_document_ids_for_user(user.id)
search_results = await specific_search(search_type, query, user)
filtered_search_results = []
for search_result in search_results:
document_id = search_result["document_id"] if "document_id" in search_result else None
document_id = UUID(document_id) if type(document_id) == str else document_id
if document_id is None or document_id in own_document_ids:
filtered_search_results.append(search_result)
return filtered_search_results
async def specific_search(search_type: SearchType, query: str, user) -> list:
search_tasks: Dict[SearchType, Callable] = {
SearchType.SUMMARIES: query_summaries,
SearchType.INSIGHTS: query_graph_connections,
SearchType.CHUNKS: query_chunks,
}
search_task = search_tasks.get(search_type)
if search_task is None:
raise ValueError(f"Unsupported search type: {search_type}")
send_telemetry("cognee.search EXECUTION STARTED", user.id)
results = await search_task(query)
send_telemetry("cognee.search EXECUTION COMPLETED", user.id)
return results

View file

@ -7,6 +7,7 @@ from contextlib import asynccontextmanager
from neo4j import AsyncSession
from neo4j import AsyncGraphDatabase
from neo4j.exceptions import Neo4jError
from networkx import predecessor
from cognee.infrastructure.databases.graph.graph_db_interface import GraphDBInterface
logger = logging.getLogger("Neo4jAdapter")
@ -307,12 +308,12 @@ class Neo4jAdapter(GraphDBInterface):
return await self.query(query)
async def get_predecessor_ids(self, node_id: str, edge_label: str = None) -> list[str]:
async def get_predecessors(self, node_id: str, edge_label: str = None) -> list[str]:
if edge_label is not None:
query = """
MATCH (node)<-[r]-(predecessor)
WHERE node.id = $node_id AND type(r) = $edge_label
RETURN predecessor.id AS predecessor_id
RETURN predecessor
"""
results = await self.query(
@ -323,12 +324,12 @@ class Neo4jAdapter(GraphDBInterface):
)
)
return [result["predecessor_id"] for result in results]
return [result["predecessor"] for result in results]
else:
query = """
MATCH (node)<-[r]-(predecessor)
WHERE node.id = $node_id
RETURN predecessor.id AS predecessor_id
RETURN predecessor
"""
results = await self.query(
@ -338,14 +339,14 @@ class Neo4jAdapter(GraphDBInterface):
)
)
return [result["predecessor_id"] for result in results]
return [result["predecessor"] for result in results]
async def get_successor_ids(self, node_id: str, edge_label: str = None) -> list[str]:
async def get_successors(self, node_id: str, edge_label: str = None) -> list[str]:
if edge_label is not None:
query = """
MATCH (node)-[r]->(successor)
WHERE node.id = $node_id AND type(r) = $edge_label
RETURN successor.id AS successor_id
RETURN successor
"""
results = await self.query(
@ -356,12 +357,12 @@ class Neo4jAdapter(GraphDBInterface):
),
)
return [result["successor_id"] for result in results]
return [result["successor"] for result in results]
else:
query = """
MATCH (node)-[r]->(successor)
WHERE node.id = $node_id
RETURN successor.id AS successor_id
RETURN successor
"""
results = await self.query(
@ -371,12 +372,41 @@ class Neo4jAdapter(GraphDBInterface):
)
)
return [result["successor_id"] for result in results]
return [result["successor"] for result in results]
async def get_neighbours(self, node_id: str) -> list[str]:
predecessor_ids, successor_ids = await asyncio.gather(self.get_predecessor_ids(node_id), self.get_successor_ids(node_id))
async def get_neighbours(self, node_id: str) -> List[Dict[str, Any]]:
predecessors, successors = await asyncio.gather(self.get_predecessors(node_id), self.get_successors(node_id))
return [*predecessor_ids, *successor_ids]
return predecessors + successors
async def get_connections(self, node_id: str) -> list:
predecessors_query = """
MATCH (node)<-[relation]-(neighbour)
WHERE node.id = $node_id
RETURN neighbour, relation, node
"""
successors_query = """
MATCH (node)-[relation]->(neighbour)
WHERE node.id = $node_id
RETURN node, relation, neighbour
"""
predecessors, successors = await asyncio.gather(
self.query(predecessors_query, dict(node_id = node_id)),
self.query(successors_query, dict(node_id = node_id)),
)
connections = []
for neighbour in predecessors:
neighbour = neighbour["relation"]
connections.append((neighbour[0], { "relationship_name": neighbour[1] }, neighbour[2]))
for neighbour in successors:
neighbour = neighbour["relation"]
connections.append((neighbour[0], { "relationship_name": neighbour[1] }, neighbour[2]))
return connections
async def remove_connection_to_predecessors_of(self, node_ids: list[str], edge_label: str) -> None:
query = f"""

View file

@ -139,29 +139,35 @@ class NetworkXAdapter(GraphDBInterface):
async def extract_nodes(self, node_ids: List[str]) -> List[dict]:
return [self.graph.nodes[node_id] for node_id in node_ids if self.graph.has_node(node_id)]
async def get_predecessor_ids(self, node_id: str, edge_label: str = None) -> list:
async def get_predecessors(self, node_id: str, edge_label: str = None) -> list:
if self.graph.has_node(node_id):
if edge_label is None:
return list(self.graph.predecessors(node_id))
return [
self.graph.nodes[predecessor] for predecessor \
in list(self.graph.predecessors(node_id))
]
nodes = []
for predecessor_id in list(self.graph.predecessors(node_id)):
if self.graph.has_edge(predecessor_id, node_id, edge_label):
nodes.append(predecessor_id)
nodes.append(self.graph.nodes[predecessor_id])
return nodes
async def get_successor_ids(self, node_id: str, edge_label: str = None) -> list:
async def get_successors(self, node_id: str, edge_label: str = None) -> list:
if self.graph.has_node(node_id):
if edge_label is None:
return list(self.graph.successors(node_id))
return [
self.graph.nodes[successor] for successor \
in list(self.graph.successors(node_id))
]
nodes = []
for successor_id in list(self.graph.successors(node_id)):
if self.graph.has_edge(node_id, successor_id, edge_label):
nodes.append(successor_id)
nodes.append(self.graph.nodes[successor_id])
return nodes
@ -169,19 +175,44 @@ class NetworkXAdapter(GraphDBInterface):
if not self.graph.has_node(node_id):
return []
predecessor_ids, successor_ids = await asyncio.gather(
self.get_predecessor_ids(node_id),
self.get_successor_ids(node_id),
predecessors, successors = await asyncio.gather(
self.get_predecessors(node_id),
self.get_successors(node_id),
)
neighbour_ids = predecessor_ids + successor_ids
neighbours = predecessors + successors
if len(neighbour_ids) == 0:
return neighbours
async def get_connections(self, node_id: str) -> list:
if not self.graph.has_node(node_id):
return []
nodes = await self.extract_nodes(neighbour_ids)
node = self.graph.nodes[node_id]
return nodes
if "uuid" not in node:
return []
predecessors, successors = await asyncio.gather(
self.get_predecessors(node_id),
self.get_successors(node_id),
)
connections = []
for neighbor in predecessors:
if "uuid" in neighbor:
edge_data = self.graph.get_edge_data(neighbor["uuid"], node["uuid"])
for edge_properties in edge_data.values():
connections.append((neighbor, edge_properties, node))
for neighbor in successors:
if "uuid" in neighbor:
edge_data = self.graph.get_edge_data(node["uuid"], neighbor["uuid"])
for edge_properties in edge_data.values():
connections.append((node, edge_properties, neighbor))
return connections
async def remove_connection_to_predecessors_of(self, node_ids: list[str], edge_label: str) -> None:
for node_id in node_ids:

View file

@ -1,3 +1,4 @@
from os import path
from typing import AsyncGenerator
from contextlib import asynccontextmanager
from sqlalchemy import text, select
@ -8,6 +9,9 @@ from ..ModelBase import Base
class SQLAlchemyAdapter():
def __init__(self, connection_string: str):
self.db_path: str = None
self.db_uri: str = connection_string
self.engine = create_async_engine(connection_string)
self.sessionmaker = async_sessionmaker(bind=self.engine, expire_on_commit=False)
@ -93,12 +97,24 @@ class SQLAlchemyAdapter():
except Exception as e:
print(f"Error dropping database tables: {e}")
async def create_database(self):
if self.engine.dialect.name == "sqlite":
from cognee.infrastructure.files.storage import LocalStorage
db_directory = path.dirname(self.db_path)
LocalStorage.ensure_directory_exists(db_directory)
async with self.engine.begin() as connection:
if len(Base.metadata.tables.keys()) > 0:
await connection.run_sync(Base.metadata.create_all)
async def delete_database(self):
try:
if self.engine.dialect.name == "sqlite":
from cognee.infrastructure.files.storage import LocalStorage
print(f"DB_PATH: {self.db_path}")
LocalStorage.remove(self.db_path)
self.db_path = None
else:

View file

@ -1,6 +1,6 @@
from uuid import UUID, uuid5, NAMESPACE_OID
from cognee.modules.data.processing.chunk_types.DocumentChunk import DocumentChunk
from cognee.modules.chunking import DocumentChunk
from cognee.tasks.chunking import chunk_by_paragraph
class TextChunker():

View file

@ -0,0 +1,2 @@
from .models.DocumentChunk import DocumentChunk
from .TextChunker import TextChunker

View file

@ -1,6 +1,6 @@
from uuid import UUID, uuid5, NAMESPACE_OID
from cognee.infrastructure.llm.get_llm_client import get_llm_client
from cognee.modules.data.chunking.TextChunker import TextChunker
from cognee.modules.chunking.TextChunker import TextChunker
from .Document import Document
class AudioDocument(Document):

View file

@ -1,6 +1,6 @@
from uuid import UUID, uuid5, NAMESPACE_OID
from cognee.infrastructure.llm.get_llm_client import get_llm_client
from cognee.modules.data.chunking.TextChunker import TextChunker
from cognee.modules.chunking.TextChunker import TextChunker
from .Document import Document

View file

@ -1,6 +1,6 @@
from uuid import UUID, uuid5, NAMESPACE_OID
from pypdf import PdfReader
from cognee.modules.data.chunking.TextChunker import TextChunker
from cognee.modules.chunking.TextChunker import TextChunker
from .Document import Document
class PdfDocument(Document):

View file

@ -1,5 +1,5 @@
from uuid import UUID, uuid5, NAMESPACE_OID
from cognee.modules.data.chunking.TextChunker import TextChunker
from cognee.modules.chunking.TextChunker import TextChunker
from .Document import Document
class TextDocument(Document):

View file

@ -1,5 +1,5 @@
from cognee.infrastructure.databases.vector import get_vector_engine
from .chunk_types import DocumentChunk
from cognee.modules.chunking import DocumentChunk
async def has_new_chunks(data_chunks: list[DocumentChunk], collection_name: str) -> list[DocumentChunk]:
vector_engine = get_vector_engine()

View file

@ -10,7 +10,7 @@ async def search_summary(query: str) -> list:
"""
vector_engine = get_vector_engine()
summaries_results = await vector_engine.search("chunk_summaries", query, limit = 5)
summaries_results = await vector_engine.search("summaries", query, limit = 5)
summaries = [summary.payload for summary in summaries_results]

View file

@ -1,10 +1,10 @@
from .chunk_extract_summary.chunk_extract_summary import chunk_extract_summary
from .summarization.summarize_text import summarize_text
from .chunk_naive_llm_classifier.chunk_naive_llm_classifier import chunk_naive_llm_classifier
from .chunk_remove_disconnected.chunk_remove_disconnected import chunk_remove_disconnected
from .chunk_update_check.chunk_update_check import chunk_update_check
from .chunks_into_graph.chunks_into_graph import chunks_into_graph
from .save_chunks_to_store.save_chunks_to_store import save_chunks_to_store
from .source_documents_to_chunks.source_documents_to_chunks import source_documents_to_chunks
from .infer_data_ontology.infer_data_ontology import infer_data_ontology
from .check_permissions_on_documents.check_permissions_on_documents import check_permissions_on_documents
from .classify_documents.classify_documents import classify_documents
from .graph.chunks_into_graph import chunks_into_graph

View file

@ -5,7 +5,7 @@ from pydantic import BaseModel
from cognee.infrastructure.databases.graph import get_graph_engine
from cognee.infrastructure.databases.vector import get_vector_engine, DataPoint
from cognee.modules.data.extraction.extract_categories import extract_categories
from cognee.modules.data.processing.chunk_types.DocumentChunk import DocumentChunk
from cognee.modules.chunking import DocumentChunk
async def chunk_naive_llm_classifier(data_chunks: list[DocumentChunk], classification_model: Type[BaseModel]):

View file

@ -1,5 +1,5 @@
from cognee.infrastructure.databases.graph import get_graph_engine
from cognee.modules.data.processing.chunk_types.DocumentChunk import DocumentChunk
from cognee.modules.chunking import DocumentChunk
async def chunk_remove_disconnected(data_chunks: list[DocumentChunk]) -> list[DocumentChunk]:
graph_engine = await get_graph_engine()
@ -9,13 +9,13 @@ async def chunk_remove_disconnected(data_chunks: list[DocumentChunk]) -> list[Do
obsolete_chunk_ids = []
for document_id in document_ids:
chunk_ids = await graph_engine.get_successor_ids(document_id, edge_label = "has_chunk")
chunks = await graph_engine.get_successors(document_id, edge_label = "has_chunk")
for chunk_id in chunk_ids:
previous_chunks = await graph_engine.get_predecessor_ids(chunk_id, edge_label = "next_chunk")
for chunk in chunks:
previous_chunks = await graph_engine.get_predecessors(chunk["uuid"], edge_label = "next_chunk")
if len(previous_chunks) == 0:
obsolete_chunk_ids.append(chunk_id)
obsolete_chunk_ids.append(chunk["uuid"])
if len(obsolete_chunk_ids) > 0:
await graph_engine.delete_nodes(obsolete_chunk_ids)

View file

@ -1,5 +1,5 @@
from cognee.infrastructure.databases.vector import get_vector_engine
from cognee.modules.data.processing.chunk_types.DocumentChunk import DocumentChunk
from cognee.modules.chunking import DocumentChunk
async def chunk_update_check(data_chunks: list[DocumentChunk], collection_name: str) -> list[DocumentChunk]:

View file

@ -1,3 +1,4 @@
from .query_chunks import query_chunks
from .chunk_by_word import chunk_by_word
from .chunk_by_sentence import chunk_by_sentence
from .chunk_by_paragraph import chunk_by_paragraph

View file

@ -1,8 +1,6 @@
from uuid import uuid5, NAMESPACE_OID
from .chunk_by_sentence import chunk_by_sentence
from cognee.tasks.chunking.chunking_registry import register_chunking_function
@register_chunking_function("paragraph")
def chunk_by_paragraph(data: str, paragraph_length: int = 1024, batch_paragraphs = True):
paragraph = ""
last_cut_type = None

View file

@ -3,9 +3,7 @@
from uuid import uuid4
from .chunk_by_word import chunk_by_word
from cognee.tasks.chunking.chunking_registry import register_chunking_function
@register_chunking_function("sentence")
def chunk_by_sentence(data: str):
sentence = ""
paragraph_id = uuid4()

View file

@ -1,10 +0,0 @@
chunking_registry = {}
def register_chunking_function(name):
def decorator(func):
chunking_registry[name] = func
return func
return decorator
def get_chunking_function(name: str):
return chunking_registry.get(name)

View file

@ -0,0 +1,17 @@
from cognee.infrastructure.databases.vector import get_vector_engine
async def query_chunks(query: str) -> list[dict]:
"""
Parameters:
- query (str): The query string to filter nodes by.
Returns:
- list(dict): A list of objects providing information about the chunks related to query.
"""
vector_engine = get_vector_engine()
found_chunks = await vector_engine.search("chunks", query, limit = 5)
chunks = [result.payload for result in found_chunks]
return chunks

View file

@ -0,0 +1,2 @@
from .chunks_into_graph import chunks_into_graph
from .query_graph_connections import query_graph_connections

View file

@ -7,7 +7,7 @@ from pydantic import BaseModel
from cognee.infrastructure.databases.graph import get_graph_engine
from cognee.infrastructure.databases.vector import DataPoint, get_vector_engine
from cognee.modules.data.extraction.knowledge_graph.extract_content_graph import extract_content_graph
from cognee.modules.data.processing.chunk_types.DocumentChunk import DocumentChunk
from cognee.modules.chunking import DocumentChunk
from cognee.modules.graph.utils import generate_node_id, generate_node_name
@ -53,7 +53,7 @@ async def chunks_into_graph(data_chunks: list[DocumentChunk], graph_model: Type[
processed_nodes[entity_node_id] = True
graph_node_edges = [
(edge.source_node_id, edge.target_node_id, edge.relationship_name) \
(edge.target_node_id, edge.source_node_id, edge.relationship_name) \
for edge in chunk_graph.edges
]

View file

@ -0,0 +1,62 @@
import asyncio
from cognee.infrastructure.databases.graph import get_graph_engine
from cognee.infrastructure.databases.vector import get_vector_engine
async def query_graph_connections(query: str, exploration_levels = 1) -> list[(str, str, str)]:
"""
Find the neighbours of a given node in the graph and return formed sentences.
Parameters:
- query (str): The query string to filter nodes by.
- exploration_levels (int): The number of jumps through edges to perform.
Returns:
- list[(str, str, str)]: A list containing the source and destination nodes and relationship.
"""
if query is None:
return []
node_id = query
graph_engine = await get_graph_engine()
exact_node = await graph_engine.extract_node(node_id)
if exact_node is not None and "uuid" in exact_node:
node_connections = await graph_engine.get_connections(exact_node["uuid"])
else:
vector_engine = get_vector_engine()
results = await asyncio.gather(
vector_engine.search("entities", query_text = query, limit = 5),
vector_engine.search("classification", query_text = query, limit = 5),
)
results = [*results[0], *results[1]]
relevant_results = [result for result in results if result.score < 0.5][:5]
if len(relevant_results) == 0:
return []
node_connections_results = await asyncio.gather(
*[graph_engine.get_connections(result.payload["uuid"]) for result in relevant_results]
)
node_connections = []
for neighbours in node_connections_results:
node_connections.extend(neighbours)
unique_node_connections_map = {}
unique_node_connections = []
for node_connection in node_connections:
if "uuid" not in node_connection[0] or "uuid" not in node_connection[2]:
continue
unique_id = f"{node_connection[0]['uuid']} {node_connection[1]['relationship_name']} {node_connection[2]['uuid']}"
if unique_id not in unique_node_connections_map:
unique_node_connections_map[unique_id] = True
unique_node_connections.append(node_connection)
return unique_node_connections

View file

@ -1,6 +1,6 @@
from cognee.infrastructure.databases.vector import DataPoint, get_vector_engine
from cognee.infrastructure.databases.graph import get_graph_engine
from cognee.modules.data.processing.chunk_types.DocumentChunk import DocumentChunk
from cognee.modules.chunking import DocumentChunk
async def save_chunks_to_store(data_chunks: list[DocumentChunk], collection_name: str):
if len(data_chunks) == 0:
@ -44,7 +44,7 @@ async def save_chunks_to_store(data_chunks: list[DocumentChunk], collection_name
chunk_nodes.append((
str(chunk.chunk_id),
dict(
id = str(chunk.chunk_id),
uuid = str(chunk.chunk_id),
chunk_id = str(chunk.chunk_id),
document_id = str(chunk.document_id),
word_count = chunk.word_count,

View file

@ -0,0 +1,42 @@
from cognee.infrastructure.databases.vector import get_vector_engine, DataPoint
async def save_to_vector_storage(data_chunks: list, collection_name: str, embed_field: str):
if len(data_chunks) == 0:
return data_chunks
if not all(isinstance(chunk, type(data_chunks[0])) for chunk in data_chunks):
raise ValueError("All data chunks must be of the same type.")
vector_engine = get_vector_engine()
PayloadSchema = type(data_chunks[0])
await vector_engine.create_collection(collection_name, payload_schema = PayloadSchema)
await vector_engine.create_data_points(
collection_name,
[
DataPoint[PayloadSchema](
id = str(chunk.id),
payload = parse_data(chunk, chunk_index),
embed_field = embed_field,
) for (chunk_index, chunk) in enumerate(data_chunks)
],
)
return data_chunks
def parse_data(chunk, chunk_index: int) -> dict:
from uuid import UUID
data = {
"chunk_index": chunk_index,
}
for key, value in vars(chunk).items():
if isinstance(value, UUID):
data[key] = str(value)
else:
data[key] = value
return data

View file

@ -0,0 +1,2 @@
from .summarize_text import summarize_text
from .query_summaries import query_summaries

View file

@ -0,0 +1,17 @@
from cognee.infrastructure.databases.vector import get_vector_engine
async def query_summaries(query: str) -> list:
"""
Parameters:
- query (str): The query string to filter summaries by.
Returns:
- list[str, UUID]: A list of objects providing information about the summaries related to query.
"""
vector_engine = get_vector_engine()
summaries_results = await vector_engine.search("summaries", query, limit = 5)
summaries = [summary.payload for summary in summaries_results]
return summaries

View file

@ -3,12 +3,12 @@ import asyncio
from typing import Type
from pydantic import BaseModel
from cognee.infrastructure.databases.vector import get_vector_engine, DataPoint
from cognee.tasks.chunk_extract_summary.models.TextSummary import TextSummary
from cognee.modules.data.extraction.extract_summary import extract_summary
from cognee.modules.data.processing.chunk_types.DocumentChunk import DocumentChunk
from cognee.modules.chunking import DocumentChunk
from .models.TextSummary import TextSummary
async def chunk_extract_summary(data_chunks: list[DocumentChunk], summarization_model: Type[BaseModel], collection_name: str = "summaries"):
async def summarize_text(data_chunks: list[DocumentChunk], summarization_model: Type[BaseModel], collection_name: str = "summaries"):
if len(data_chunks) == 0:
return data_chunks

View file

@ -2,6 +2,7 @@ import os
import logging
import pathlib
import cognee
from cognee.api.v1.search import SearchType
logging.basicConfig(level = logging.DEBUG)
@ -19,14 +20,6 @@ async def main():
ai_text_file_path = os.path.join(pathlib.Path(__file__).parent, "test_data/artificial-intelligence.pdf")
await cognee.add([ai_text_file_path], dataset_name)
# text = """A quantum computer is a computer that takes advantage of quantum mechanical phenomena.
# At small scales, physical matter exhibits properties of both particles and waves, and quantum computing leverages this behavior, specifically quantum superposition and entanglement, using specialized hardware that supports the preparation and manipulation of quantum states.
# Classical physics cannot explain the operation of these quantum devices, and a scalable quantum computer could perform some calculations exponentially faster (with respect to input size scaling) than any modern "classical" computer. In particular, a large-scale quantum computer could break widely used encryption schemes and aid physicists in performing physical simulations; however, the current state of the technology is largely experimental and impractical, with several obstacles to useful applications. Moreover, scalable quantum computers do not hold promise for many practical tasks, and for many important tasks quantum speedups are proven impossible.
# The basic unit of information in quantum computing is the qubit, similar to the bit in traditional digital electronics. Unlike a classical bit, a qubit can exist in a superposition of its two "basis" states. When measuring a qubit, the result is a probabilistic output of a classical bit, therefore making quantum computers nondeterministic in general. If a quantum computer manipulates the qubit in a particular way, wave interference effects can amplify the desired measurement results. The design of quantum algorithms involves creating procedures that allow a quantum computer to perform calculations efficiently and quickly.
# Physically engineering high-quality qubits has proven challenging. If a physical qubit is not sufficiently isolated from its environment, it suffers from quantum decoherence, introducing noise into calculations. Paradoxically, perfectly isolating qubits is also undesirable because quantum computations typically need to initialize qubits, perform controlled qubit interactions, and measure the resulting quantum states. Each of those operations introduces errors and suffers from noise, and such inaccuracies accumulate.
# In principle, a non-quantum (classical) computer can solve the same computational problems as a quantum computer, given enough time. Quantum advantage comes in the form of time complexity rather than computability, and quantum complexity theory shows that some quantum algorithms for carefully selected tasks require exponentially fewer computational steps than the best known non-quantum algorithms. Such tasks can in theory be solved on a large-scale quantum computer whereas classical computers would not finish computations in any reasonable amount of time. However, quantum speedup is not universal or even typical across computational tasks, since basic tasks such as sorting are proven to not allow any asymptotic quantum speedup. Claims of quantum supremacy have drawn significant attention to the discipline, but are demonstrated on contrived tasks, while near-term practical use cases remain limited.
# """
text = """A large language model (LLM) is a language model notable for its ability to achieve general-purpose language generation and other natural language processing tasks such as classification. LLMs acquire these abilities by learning statistical relationships from text documents during a computationally intensive self-supervised and semi-supervised training process. LLMs can be used for text generation, a form of generative AI, by taking an input text and repeatedly predicting the next token or word.
LLMs are artificial neural networks. The largest and most capable, as of March 2024, are built with a decoder-only transformer-based architecture while some recent implementations are based on other architectures, such as recurrent neural network variants and Mamba (a state space model).
Up to 2020, fine tuning was the only way a model could be adapted to be able to accomplish specific tasks. Larger sized models, such as GPT-3, however, can be prompt-engineered to achieve similar results.[6] They are thought to acquire knowledge about syntax, semantics and "ontology" inherent in human language corpora, but also inaccuracies and biases present in the corpora.
@ -42,27 +35,21 @@ async def main():
random_node = (await vector_engine.search("entities", "AI"))[0]
random_node_name = random_node.payload["name"]
search_results = await cognee.search("SIMILARITY", params = { "query": random_node_name })
search_results = await cognee.search(SearchType.INSIGHTS, query = random_node_name)
assert len(search_results) != 0, "The search results list is empty."
print("\n\nExtracted sentences are:\n")
for result in search_results:
print(f"{result}\n")
search_results = await cognee.search("TRAVERSE", params = { "query": random_node_name })
search_results = await cognee.search(SearchType.CHUNKS, query = random_node_name)
assert len(search_results) != 0, "The search results list is empty."
print("\n\nExtracted sentences are:\n")
print("\n\nExtracted chunks are:\n")
for result in search_results:
print(f"{result}\n")
search_results = await cognee.search("SUMMARY", params = { "query": random_node_name })
search_results = await cognee.search(SearchType.SUMMARIES, query = random_node_name)
assert len(search_results) != 0, "Query related summaries don't exist."
print("\n\nQuery related summaries exist:\n")
for result in search_results:
print(f"{result}\n")
search_results = await cognee.search("ADJACENT", params = { "query": random_node_name })
assert len(search_results) != 0, "Large language model query found no neighbours."
print("\n\nLarge language model query found neighbours.\n")
print("\n\Extracted summaries are:\n")
for result in search_results:
print(f"{result}\n")

View file

@ -3,6 +3,7 @@ import os
import logging
import pathlib
import cognee
from cognee.api.v1.search import SearchType
logging.basicConfig(level=logging.DEBUG)
@ -38,27 +39,21 @@ async def main():
random_node = (await vector_engine.search("entities", "AI"))[0]
random_node_name = random_node.payload["name"]
search_results = await cognee.search("SIMILARITY", { "query": random_node_name })
search_results = await cognee.search(SearchType.INSIGHTS, query = random_node_name)
assert len(search_results) != 0, "The search results list is empty."
print("\n\nExtracted sentences are:\n")
for result in search_results:
print(f"{result}\n")
search_results = await cognee.search("TRAVERSE", { "query": random_node_name })
search_results = await cognee.search(SearchType.CHUNKS, query = random_node_name)
assert len(search_results) != 0, "The search results list is empty."
print("\n\nExtracted sentences are:\n")
print("\n\nExtracted chunks are:\n")
for result in search_results:
print(f"{result}\n")
search_results = await cognee.search("SUMMARY", { "query": random_node_name })
search_results = await cognee.search(SearchType.SUMMARIES, query = random_node_name)
assert len(search_results) != 0, "Query related summaries don't exist."
print("\n\nQuery related summaries exist:\n")
for result in search_results:
print(f"{result}\n")
search_results = await cognee.search("ADJACENT", { "query": random_node_name })
assert len(search_results) != 0, "Large language model query found no neighbours."
print("\n\Large language model query found neighbours.\n")
print("\n\Extracted summaries are:\n")
for result in search_results:
print(f"{result}\n")

View file

@ -4,6 +4,7 @@ import os
import logging
import pathlib
import cognee
from cognee.api.v1.search import SearchType
logging.basicConfig(level=logging.DEBUG)
@ -39,27 +40,21 @@ async def main():
random_node = (await vector_engine.search("entities", "AI"))[0]
random_node_name = random_node.payload["name"]
search_results = await cognee.search("SIMILARITY", { "query": random_node_name })
search_results = await cognee.search(SearchType.INSIGHTS, query = random_node_name)
assert len(search_results) != 0, "The search results list is empty."
print("\n\nExtracted sentences are:\n")
for result in search_results:
print(f"{result}\n")
search_results = await cognee.search("TRAVERSE", { "query": random_node_name })
search_results = await cognee.search(SearchType.CHUNKS, query = random_node_name)
assert len(search_results) != 0, "The search results list is empty."
print("\n\nExtracted sentences are:\n")
print("\n\nExtracted chunks are:\n")
for result in search_results:
print(f"{result}\n")
search_results = await cognee.search("SUMMARY", { "query": random_node_name })
search_results = await cognee.search(SearchType.SUMMARIES, query = random_node_name)
assert len(search_results) != 0, "Query related summaries don't exist."
print("\n\nQuery related summaries exist:\n")
for result in search_results:
print(f"{result}\n")
search_results = await cognee.search("ADJACENT", { "query": random_node_name })
assert len(search_results) != 0, "Large language model query found no neighbours."
print("\n\Large language model query found neighbours.\n")
print("\n\Extracted summaries are:\n")
for result in search_results:
print(f"{result}\n")

View file

@ -2,6 +2,7 @@ import os
import logging
import pathlib
import cognee
from cognee.api.v1.search import SearchType
logging.basicConfig(level=logging.DEBUG)
@ -37,27 +38,21 @@ async def main():
random_node = (await vector_engine.search("entities", "AI"))[0]
random_node_name = random_node.payload["name"]
search_results = await cognee.search("SIMILARITY", { "query": random_node_name })
search_results = await cognee.search(SearchType.INSIGHTS, query = random_node_name)
assert len(search_results) != 0, "The search results list is empty."
print("\n\nExtracted sentences are:\n")
for result in search_results:
print(f"{result}\n")
search_results = await cognee.search("TRAVERSE", { "query": random_node_name })
search_results = await cognee.search(SearchType.CHUNKS, query = random_node_name)
assert len(search_results) != 0, "The search results list is empty."
print("\n\nExtracted sentences are:\n")
print("\n\nExtracted chunks are:\n")
for result in search_results:
print(f"{result}\n")
search_results = await cognee.search("SUMMARY", { "query": random_node_name })
search_results = await cognee.search(SearchType.SUMMARIES, query = random_node_name)
assert len(search_results) != 0, "Query related summaries don't exist."
print("\n\nQuery related summaries exist:\n")
for result in search_results:
print(f"{result}\n")
search_results = await cognee.search("ADJACENT", { "query": random_node_name })
assert len(search_results) != 0, "Large language model query found no neighbours."
print("\n\Large language model query found neighbours.\n")
print("\n\Extracted summaries are:\n")
for result in search_results:
print(f"{result}\n")

View file

@ -201,30 +201,43 @@ The base URL for all API requests is determined by the server's deployment envir
- **Auth Required**: No
- **Description**: Search for nodes in the graph based on the provided query parameters.
<!-- **Request Body**:
- `query_params`: A dictionary of query parameters. -->
**Request Body**:
```json
{
"query_params": [{
"query": "QUERY_TO_MATCH_DATA",
"searchType": "SIMILARITY", // or TRAVERSE, ADJACENT, SUMMARY
}]
"searchType": "INSIGHTS", # Or "SUMMARIES" or "CHUNKS"
"query": "QUERY_TO_MATCH_DATA"
}
```
**Response**
For "INSIGHTS" search type:
```json
{
"results": [
{
"node_id": "node_id_1",
"attributes": {...},
...
},
[[
{ "name" "source_node_name" },
{ "relationship_name" "between_nodes_relationship_name" },
{ "name" "target_node_name" },
]]
```
For "SUMMARIES" search type:
```json
[
{ "text" "summary_text" },
{ "text" "summary_text" },
{ "text" "summary_text" },
...
]
}
```
For "CHUNKS" search type:
```json
[
{ "text" "chunk_text" },
{ "text" "chunk_text" },
{ "text" "chunk_text" },
...
]
```
### 12. Get Settings

View file

@ -1,6 +0,0 @@
authors:
tricalt:
name: Vasilije Markovic
description: Creator
avatar: https://avatars.githubusercontent.com/u/8619304?v=4
url: https://twitter.com/intent/follow?screen_name=tricalt

View file

@ -1,39 +0,0 @@
# Blog
The goal of the blog is to discuss broader topics around the cognee project, including the motivation behind the project, the technical details, and the future of the project.
## knowledge graphs + rags
1. [LLMOps stack + Graphs](posts/Shiny_new_LLMOps.md)
[//]: # (2. [Where do knowledge graphs fit, and where do they not? A case study with dynamo.fyi]&#40;posts/where-do-knowledge-graphs-fit.md&#41;)
[//]: # (3. [Knowledge Graphs vs basic RAGs, some metrics]&#40;posts/knowledge-graphs-vs-basic-rags.md&#41;)
[//]: # ()
## product announcements
This section covers the release notes for the cognee library. It includes the new features, bug fixes, and improvements in each release.
1. [Cognee - library release](posts/cognee-library-release.md)
[//]: # (2. [Cognee - v0.1.12 announcement]&#40;posts/cognee-v0.1.12.md&#41;)
3. [New website for cognee](posts/cognee-new-website.md)
## Towards deterministic data pipelines for LLMs step by step
This series mostly deals with product discovery, data engineering, and the development of robust AI memory data pipelines.
1. [From demo to production 1](posts/from-demo-to-production-1.md)
2. [From demo to production 2](posts/from-demo-to-production-2.md)
3. [From demo to production 3](posts/from-demo-to-production-3.md)
4. [From demo to production 4](posts/from-demo-to-production-4.md)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 143 KiB

View file

@ -1,89 +0,0 @@
---
draft: False
date: 2024-06-14
tags:
- LLMOps
- Graphs
- Vector Stores
- Feature Storage
authors:
- tricalt
---
# LLMOps stack + Graphs
### **The past: Berlin startup scene**
Machine learning has had its place in the business models of companies for several years, but due to high labor costs, lack of generalizability, and long development cycles, it often did not meet the early days' expectations. With the rise of ChatGPT, however, foundation models and LLMs are reemerging as the next step in the evolution in the Machine Learning stack, democratizing it for the end users.
As a consultant and operator in the Berlin startup scene over the past ten years, I have seen the vocation of “Data Scientist” reflect this trend as well. Its initial popularity, decline, and resurgence easily could have mirrored the rise and fall of the Roman Empire. Humble beginnings, grandiosity, and then the rigidity of thought of early Christianity.
In my early years as a data analyst in tier-II e-commerce businesses, data science was considered a prestigious, cutting-edge title. However,  most of these ventures lacked the experience or maturity to properly productionize their business models.
Often, I would see a data scientist building tons of features for their companys AI models to only slightly improve on their basic KPIs. They were often stuck in the limbo of demoware, and only the businesses in which data was a key operational element would successfully deploy and run data science systems at scale.
---
### **Pandemic and fall out of grace**
Over the years, this low impact-high drain dynamic led to data science falling out of favor. The COVID pandemic seemed to deliver a death blow to the Berlin Data Science community, with many data scientists being made redundant.
This played out differently in larger markets and companies, where I saw more mature setups heavily relying on machine learning. However, from the perspective of most software *Mittelstand* (a German term for medium-sized enterprises), the technology was seen as a nice-to-have, not a must-have.
Suddenly, with the release of ChatGPT, most knowledge previously required to operate machine learning became obsolete,  with the only thing now needed being an API key. This dropped the barrier to entry to the floor and created a need for new tools to be built around these APIs.
Tools like Langchain met this need perfectly, enabling everyone to interact with their data.
### **ML to LLMOps**
A question arises about how we should approach LLM tooling. Ignoring previous knowledge and inferring new paradigms(Agents come to mind)  as if in a vacuum can be counterproductive. Re-inventing categories should be done cautiously; history shows that overdoing it can lead to failure.
A recently published article by angel investor and Professor of Neuroscience at U.C. London, Peter Zhegin, has effectively mapped out the elements of the MLOps system ripe for disruption, suggesting which ones might be impacted:
1. **Vector Stores**: The authors argue that data storage and vector stores will be acquired by large players, but that differentiation still may be possible in the data space. They state that "A realistic way to differentiate might be to push for real-time vectorization while finding the best way to use traditional databases and feature stores (relational/NoSQL) together with vector DBs."
2. **Feature Storage**: The authors note that "Prompt engineering does not involve traditional training but allows one to change the model's behavior during inference by creating appropriate instructions or queries. This training without training presents an interesting opportunity outside the MLOps pipeline."
### **Feature Stores and the Next Steps**
The evolution of the MLOps stack signals a need for a new type of feature store that enables in-context learning.
Since fine-tuning LLMs will start happening at inference time, we need a system to interact with and manage data points fed to the LLM at scale. This implies a need for a feature store that provides more determinism to the LLM outputs, enabling the integration of business processes and practices into data that captures the context of an enterprise or organization.
An example of such a use case would be using a set of documents from different departments, enabling the LLM to understand the relationships between these documents and their individual parts.
This effort often requires humans to provide the base rules for how the LLM should interact with the information, leading to the creation of what is commonly referred to as a RAG (Retrieval Augmented Generation) pipeline.
---
Since recently, weve been able to combine graphs and vector data stores to create a semantic layer on top of naive RAGs. This layer has been a major step towards encoding rules into an in-context learning pipeline.
In [his recent blog post](https://medium.com/enterprise-rag/understanding-the-knowledge-graph-rag-opportunity-694b61261a9c), co-founder of WhyHow.AI, Chia Jeng Yang, explained what a typical RAG pipeline looks like. He also introduced Graph Ops and Vector Ops as new elements of the RAG stack which can lead to more stable retrieval patterns.
![Shiny_new_LLMOps/Untitled.png](Shiny_new_LLMOps/Untitled.png)
The argument Zhegin [made a few months ago is now taking shape](https://investingbyapproximation.substack.com/p/the-stunning-rise-of-llmops-navigating?r=2jos7&utm_campaign=post&utm_medium=web&triedRedirect=true). We are seeing feature stores evolve into tools that manage vector and graph stores.
We are still in the early stages, though. As Jon Turrow of Madrona Ventures  [**suggests**](https://www.madrona.com/the-rise-of-ai-agent-infrastructure/), the next generation of AI agent infrastructure—what Chia refers to as Graph Ops—will be a personalization layer.
---
I believe that these terms are interchangeable and that a new in-context Feature Store, Vector and Graph Ops, and personalization layers are essentially the same thing. Moreover, its my belief that Vector and Graph Ops are not differentiation categories in and of themselves.
The challenge is, thus, not connecting Vector and Graph stores or giving a RAG system a 10% performance boost.
The main issues still remain
![Shiny_new_LLMOps/llm_problems.png](Shiny_new_LLMOps/llm_problems.png)
The challenge and solution lie in creating a new type of probabilistic data engine—one with an interface as simple as SQL, but which can retrieve and structure information in real-time, optimizing what we feed the LLM based on solid evaluations.
---
Striving to make sense of the best computing engine we know of—our mind—cognitive sciences may offer us clues on how to move forward.
After all, we process, store, and retrieve data from our mental lexicon with ease, with inherent personalization and dynamic data enrichment.
I believe that understanding the way our mind carries out these processes may allow us to replicate them in machine learning.
With human language as the new SQL and cognitive theories as inspiration, the next generation of tooling is still on the horizon.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 258 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 56 KiB

View file

@ -1,111 +0,0 @@
---
draft: False
date: 2024-04-02
tags:
- pydantic
- langchain
- llm
- openai
- functions
- pdfs
authors:
- tricalt
---
# Cognee - release v0.1.0
### **Preface**
In a series of posts we explored issues with RAGs and the way we can build new infrastructure stack for the world of agent networks.
To borrow the phrase Microsoft used, to restate the problem:
- Baseline RAG performs poorly when asked to understand summarized semantic concepts holistically over large data collections or even singular large documents.
In the previous blog post we explained how developing a data platform and a memory layer for LLMs was one of our core aims.
To do that more effectively we turned [cognee](https://www.notion.so/Change-button-Submit-appearance-when-clicked-on-www-prometh-ai-13e59427636940598a0fd3938a2d2253?pvs=21) into a python library in order to make it easier to use and get inspiration from the OSS community.
# **Improved memory architecture**
![architecture.png](Cognee%20-%20library%20release%20157322a0aa8346ebbbf8d81943b4ca4f/architecture.png)
With the integration of Keepi.ai, we encountered several challenges that made us reassess our strategy. Among the issues weve identified were:
- The decomposition of user prompts into interconnected elements proved overly granular, leading to data management difficulties on load and retrieval.
- A recurring problem was the near-identical decomposition pattern for similar messages, which resulted in content duplication and an enlarged user graph. Our takeaway was that words and their interrelations represent only a fragment of the broader picture. We need to be able to guide the set of logical connections and make the system dynamic so that the data models can be adapted and adjusted to each particular use-case. What works for e-commerce transaction handling might not work for an AI vertical creating power point slides.
- The data model, encompassing Long-Term, Short-Term, and Buffer memory, proved both limited in scope and rigid, lacking the versatility to accommodate diverse applications and use cases. Just collecting all elements from all memories seemed naive, while getting certain nodes with classifiers did not add enough value.
- The retrieval of the entire buffer highlighted the need for improved buffer content management and a more adaptable buffer structure. We conceptualized the buffer as the analogue of human working memory, and recognize the need to better manage the stored data.
Moving forward, we have adopted several new strategies, features, and design principles:
### Propositions:
Defined as atomic expressions within a text, each proposition encapsulates a unique factoid, conveyed in a succinct, standalone natural language format.
We employ Large Language Models (LLMs) to break down text into propositions and link them, forming graphs with propositions as nodes and their connections as edges.
For example, "Grass is green", and "2 + 5 = 5" are propositions. The first proposition has the truth value of "true" and the second "false".
The inspiration was found in the following paper: https://arxiv.org/pdf/2312.06648.pdf
### Multilayer Graph Network:
A cognitive multilayer networks is both a quantitative and interpretive framework for exploring the mental lexicon, the intricate cognitive system that stores information about known words/concepts.
Mental lexicon is component of the human language faculty that contains information regarding the composition of words.
Utilizing LLMs, we construct layers within the multilayer network to house propositions and their interrelations, enabling the interconnection of different semantic layers and the cross-layer linking of propositions. This facilitates both the segmentation and accurate retrieval of information.
For example, if "John Doe" authored two New York Times cooking articles, we could extract an "ingredients" layer when needed, while also easily accessing all articles by "John Doe".
We used concepts from psycholinguistics described here: https://arxiv.org/abs/1507.08539
### Data Loader:
Its vital that we address the technical challenges associated with Retrieval-Augmented Generation (RAG), such as metadata management, context retrieval, knowledge sanitization, and data enrichment.
The solution lies in a dependable data pipeline capable of efficiently and scalably preparing and loading data in various formats from a range of different sources. For this purpose, we can use 'dlt' as our data loader, gaining access to over 28 supported data sources.
To enhance the Pythonic interface, we streamlined the use of cognee into three primary methods. Users can now execute the following steps:
- **cognee.add(data)**: This method is used to input and normalize the data. It ensures the data is in the correct format and ready for further processing.
- **cognee.cognify()**: This function constructs a multilayer network of propositions, organizing the data into an interconnected, semantic structure that facilitates complex analysis and retrieval.
- **cognee.search(query, method='default')**: The search method enables the user to locate specific nodes, vectors, or generate summaries within the dataset, based on the provided query and chosen search method. We employ a combination of search approaches, each one relying on the technology implemented by vector datastores and graph stores.
# Integration and Workflow
The integration of these three components allows for a cohesive and efficient workflow:
**Data Input and Normalization**:
Initially, Cognee.add is employed to input the data. During this stage, a dlt loader operates behind the scenes to process and store the data, assigning a unique dataset ID for tracking and retrieval purposes. This ensures the data is properly formatted and normalized, laying a solid foundation for the subsequent steps.
**Creation of Multilayer Network**:
Following the data normalization, Cognee.cognify takes the stage, constructing a multilayer network from the propositions derived from the input data. The network is created using LLM as a judge approach, with specific prompt that ask for creating of a set of relationships. This approach results in a set of layers and relationships that represent the document.
**Data Retrieval and Analysis**
The final step involves Cognee.search, where the user can query the constructed network to find specific information, analyze patterns, or extract summaries. The flexibility of the search function allows to search for content labels, summaries, nodes and also be able to retrieve data via similarity search. We also enable a combination of methods, which leads to getting benefits of different search approaches.
# **Whats next**
We're diligently developing our upcoming features, with key objectives including:
1. Adding audio and image support
2. Improving search
3. Adding evals
4. Adding local models
5. Adding dspy
To keep up with the progress, explore our [implementation](https://github.com/topoteretes/cognee) on GitHub and, if you find it valuable, consider starring it to show your support.

View file

@ -1,17 +0,0 @@
---
draft: False
date: 2024-06-14
tags:
- graphs
- GraphRAG
authors:
- tricalt
---
We are excited to announce the launch of Cognee.ai.
Highlights:
Book a Discussion: Schedule a consultation directly through our website.
Check it out at: www.cognee.ai and book your discussion with our experts.
We'd be happy to hear your feedback and discuss GraphRAGs, and more.

View file

@ -1,57 +0,0 @@
---
draft: False
date: 2024-05-08
tags:
- pydantic
- langchain
- llm
- openai
- functions
- pdfs
authors:
- tricalt
---
## Cognee - release v0.1.4
## New Features
### Enhanced Text Processing Capabilities:
Customizable models for classification, summarization, and labeling have been introduced to extend the versatility of our text analytics.
### Better Graph Integration and Visualization:
We have revamped our graph logic. Introduced comprehensive support for multiple graph database types including but not limited to Neo4j and NetworkX, enhancing integration and scalability.
New functions for graph rendering, color palette generation, and dynamic graph visualization to help you better visualize data relationships and structures.
### DSPy Module:
You can now train your graph generation query on a dataset and visualize the results with the dspy module.
### Async Support:
Added asyncio support in various modules to improve performance and system efficiency, reducing latency and resource consumption.
## Enhancements
### Infrastructure Upgrades:
Our infrastructure has been significantly upgraded to support a wider range of models and third-party providers, ensuring compatibility and performance. Improved configuration settings for a more robust and adaptive operational environment.
### Graph and Logic Improvements:
Improved Neo4j Integration: Enhancements to our Neo4j graph database integration for better performance and stability. Semantic Links and Node Logic: We have improved the semantic linkage between nodes and enhanced the node logic for a more intuitive and powerful user experience.
## Refactor
### Asynchronous Module Updates:
Various modules have been updated to use asynchronous operations to enhance the responsiveness and scalability of our systems.
## Documentation
### Enhanced Documentation:
We have extensively added to and reorganized our documentation.
## Bug Fixes
### Data Handling and Logic Improvements:
Fixed several inconsistencies in data handling and improved the logic flow in text input processing.

View file

@ -1,352 +0,0 @@
---
draft: False
date: 2023-10-05
tags:
- pydantic
- langchain
- llm
- openai
- functions
- pdfs
authors:
- tricalt
---
# Going beyond Langchain + Weaviate and towards a production ready modern data platform
### Table of Contents
## **1. Introduction: The Current Generative AI Landscape**
### 1.1. A brief overview
Browsing the [largest AI platform directory](https://theresanaiforthat.com/) available at the moment, we can observe around 7,000 new, mostly semi-finished AI projects — projects whose development is fueled by recent improvements in foundation models and open-source community contributions.
Decades of technological advancements have led to small teams being able to do in 2023 what in 2015 required a team of dozens.
Yet, the AI apps currently being pushed out still mostly feel and perform like demos.
It seems it has never been easier to create a startup, build an AI app, go to market… and fail.
The consensus is, nevertheless, that the AI space is *the* place to be in 2023.
> “The AI Engineer [...] will likely be the **highest-demand engineering job of the [coming] decade.”**
>
**[Swyx](https://www.latent.space/p/ai-engineer)**
The stellar rise of AI engineering as a profession is, perhaps, signaling the need for a unified solution that is not yet there — a platform that is, in its essence, a Large Language Model (LLM), which could be employed as [a powerful general problem solver](https://lilianweng.github.io/posts/2023-06-23-agent/?fbclid=IwAR1p0W-Mg_4WtjOCeE8E6s7pJZlTDCDLmcXqHYVIrEVisz_D_S8LfN6Vv20).
To address this issue, dlthub and [prometh.ai](http://prometh.ai/) will collaborate on productionizing a common use-case, PDF processing, progressing step by step. We will use LLMs, AI frameworks, and services, refining the code until we attain a clearer understanding of what a modern LLM architecture stack might entail.
You can find the code in the [PromethAI-Memory repository](https://github.com/topoteretes/PromethAI-Memory)
### **1.2. The problem of putting code to production**
![infographic (2).png](Going%20beyond%20Langchain%20+%20Weaviate%20and%20towards%20a%20pr%207351d77a1eba40aab4394c24bef3a278/infographic_(2).png)
Despite all the AI startup hype, theres a glaring issue lurking right under the surface: **foundation models do not have production-ready data infrastructure by default**
Everyone seems to be building simple tools, like “Your Sales Agent” or “Your HR helper,” on top of OpenAI — a so-called  “Thin Wrapper” — and selling them as services.
Our intention, however, is not to merely capitalize on this nascent industry — its to use a new technology to catalyze a true digital paradigm shift  — to [paraphrase investor Marc Andreessen](https://www.youtube.com/watch?v=-hxeDjAxvJ8&t=328s&ab_channel=LexFridman), content of the new medium as the content of the previous medium.
What Andreessen meant by this is that each new medium for sharing information must encapsulate the content of the prior medium. For example, the internet encapsulates all books, movies, pictures, and stories from previous mediums.
After a unified AI solution is created, only then will AI agents be able to proactively and competently operate the browsers, apps, and devices we operate by ourselves today.
Intelligent agents in AI are programs capable of [perceiving](https://en.wikipedia.org/wiki/Machine_perception) their environment, acting [autonomously](https://en.wikipedia.org/wiki/Autonomous) in order to achieve goals, and may improve their performance by [learning](https://en.wikipedia.org/wiki/Machine_learning) or acquiring [knowledge](https://en.wikipedia.org/wiki/Knowledge_representation).
The reality is that we now have a set of data platforms and AI agents that are becoming available to the general public, whose content and methods were previously inaccessible to anyone not privy to the tech-heavy languages of data scientists and engineers.
As engineering tools move toward the mainstream, they need to become more intuitive and user friendly, while hiding their complexity with a set of background solutions.
> *Fundamentally, the issue of “Thin wrappers” is not an issue of bad products, but an issue of a lack of robust enough data engineering methods coupled with the general difficulties that come with creating production-ready code that relies on robust data platforms in a new space.*
>
The current lack of production-ready data systems for LLMs and AI Agents opens up a gap we want to fill  by introducing robust data engineering practices to solve this issue.
In this series of texts, our aim will thus be to explore what would constitute:
1. Proper data engineering methods for LLMs
2. A production-ready generative AI data platform that unlocks AI assistants/Agent Networks
Each of the coming blog posts will be followed by Python code, to demonstrate the progress made toward building a modern AI data platform, raise important questions, and facilitate an open-source collaboration.
Lets start by setting an attainable goal. As an example, lets conceptualize a production-ready process that can analyze and process hundreds of PDFs for hundreds of users.
<aside>
💡 As a user, I want an AI Data Platform to enable me to extract, organize, and summarize data from PDF invoices so that it's seamlessly updated in the database and available for further processing.
</aside>
Imagine you're a developer, and you've got a stack of digital invoices in PDF format from various vendors. These PDFs are not just simple text files; they contain logos, varying fonts, perhaps some tables, and even handwritten notes or signatures.
Your goal? To extract relevant information, such as vendor names, invoice dates, total amounts, and line-item details, among others.
This task of analyzing PDFs may help us understand and define what a production-ready AI data platform entails. To perform the task, well be drawing a parallel between Data Engineering concepts and those from Cognitive Sciences which tap into our understanding of how human memory works — this should provide the baseline for the evaluation of the POCs in this post.
We assume that Agent Networks of the future would resemble groups of humans with their own memory and unique contexts, all working and contributing toward a set of common objectives.
In our example of data extraction from PDFs — a modern enterprise may have hundreds of thousands, if not millions of such documents stored in different places, with many people hired to make sense of them.
This data is considered unstructured — you cannot handle it easily with current data engineering practices and database technology. The task to structure it is difficult and, to this day, has always needed to be performed manually.
With the advent of Agent Networks, which mimic human cognitive abilities, we could start realistically structuring this kind of information at scale. As this is still data processing — an engineering task — we need to combine those two approaches.
From an engineering standpoint, the next generation Data Platform needs to be built with the following in mind:
- We need to give Agents access to the data at scale.
- We need our Agents to operate like human minds so we need to provide them with tools to execute tasks and various types of memory for reasoning
- We need to keep the systems under control, meaning that we apply good engineering practices to the whole system
- We need to be able to test, sandbox, and roll back what Agents do and we need to observe them and log every action
In order to conceptualize a new model of data structure and relationships that transcends the traditional Data Warehousing approach, we can start perceiving procedural steps in Agent execution flows as thoughts and interpreting them through the prism of human cognitive processes such as the functioning of our memory system and its memory components.
Human memory can be divided into several distinct categories:
- **Sensory Memory (SM)** → Very short term (15-30s) memory storage unit receiving information from our senses.
- **Short Term Memory (STM)** → Short term memory that processes the information, and coordinates work based on information provided.
- **Long-Term Memory (LTM) →** Stores information long term, and retrieves what it needs for daily life.
The general structure of human memory. Note that [Weng](https://lilianweng.github.io/posts/2023-06-23-agent/) doesnt expand on the STM here in the way we did above :
![Untitled](Going%20beyond%20Langchain%20+%20Weaviate%20and%20towards%20a%20pr%207351d77a1eba40aab4394c24bef3a278/Untitled.png)
Broader, more relevant representation of memory for our context, and the corresponding data processing, based on [Atkinson-Schiffrin memory model](https://en.wikipedia.org/wiki/Atkinson%E2%80%93Shiffrin_memory_model) would be:
![Untitled](Going%20beyond%20Langchain%20+%20Weaviate%20and%20towards%20a%20pr%207351d77a1eba40aab4394c24bef3a278/Untitled%201.png)
## **2. Level 0: The Current State of Affairs**
To understand the current LLM production systems, how they handle data input and processing, and their evolution, we start at Level 0 — the LLMs and their APIs as they are currently — and progress toward Level 7 — AI Agents and complex AI Data Platforms and Agent Networks of the future.
### 2.1. Developer Intent at Level 0
![infographic (2).png](Going%20beyond%20Langchain%20+%20Weaviate%20and%20towards%20a%20pr%207351d77a1eba40aab4394c24bef3a278/infographic_(2)%201.png)
In order to extract relevant data from PDF documents, as an engineer you would turn to a powerful AI model like OpenAI, Anthropic, or Cohere (Layer 0 in our XYZ stack). Not all of them support this functionality, so youd use [Bing](https://www.notion.so/Go-to-market-under-construction-04a750a15c264df4be5c6769289b99a2?pvs=21) or a ChatGPT plugin like [AskPDF](https://plugin.askyourpdf.com/), which do.
In order to "extract nuances," you might provide the model with specific examples or more directive prompts. For instance, "Identify the vendor name positioned near the top of the invoice, usually above the billing details."
Next, you'd "prompt it" with various PDFs to see how it reacts. Based on the outputs, you might notice that it misses handwritten dates or gets confused with certain fonts.
This is where "[prompt engineering](https://www.promptingguide.ai/)" comes in. You might adjust your initial prompt to be more specific or provide additional context. Maybe you now say, "Identify the vendor name and, if you find any handwritten text, treat it as the invoice date."
### 2.2 **Toward the production code from the chatbot UX** - POC at level 0
![Untitled](Going%20beyond%20Langchain%20+%20Weaviate%20and%20towards%20a%20pr%207351d77a1eba40aab4394c24bef3a278/Untitled%202.png)
Our POC at this stage consists of simply uploading a PDF and asking it questions until we have better and better answers based on prompt engineering. This exercise shows what is available with the current production systems, to help us set a baseline for the solutions to come.
- If your goal is to understand the content of a PDF, Bing and OpenAI will enable you to upload documents and get explanations of their contents
- Uses basic natural language processing (NLP) prompts without any schema on output data
- Typically “forgets” the data after a query — no notion of storage (LTM)
- In a production environment, data loss can have significant consequences. It can lead to operational disruptions, inaccurate analytics, and loss of valuable insights
- There is no possibility to test the behavior of the system
Lets break down the Data Platform component at this stage:
| Memory type | State | Description |
| --- | --- | --- |
| Sensory Memory | Chatbot interface | Can be interpreted in this context as the interface used for the human input |
| STM | The context window of the chatbot/search. In essence stateless | The processing layer and a storage of the session/user context |
| LTM | Not present at this stage | The information storage |
Lacks:
- Decoupling: Separating components to reduce interdependency.
- Portability: Ability to run in different environments.
- Modularity: Breaking down into smaller, independent parts.
Extendability: Capability to add features or functionality.
**Next Steps**:
1. Implement a LTM memory component for information retention.
2. Develop an abstraction layer for Sensory Memory input and processing multiple file types.
Addressing these points will enhance flexibility, reusability, and adaptability.
### 2.3 Summary - Ask PDF questions
| Description | Use-Case | Summary | Memory | Maturity | Production readiness |
| --- | --- | --- | --- | --- | --- |
| The Foundational Model | Extract info from your documents | ChatGPT prompt engineering as the only way to optimise outputs | SM, STM are system defined, LTM is not present | Works 15% of time | Lacks Decoupling, Portability, Modularity and Extendability |
### 2.4. Addendum - companies in the space: OpenAI, Anthropic, and Cohere
- A brief on each provider, relevant model and its role in the modern data space.
- The list of models and providers in the [space](https://mindsdb.com/blog/navigating-the-llm-landscape-a-comparative-analysis-of-leading-large-language-models)
| Model | Provider | Structured data | Speed | Params | Fine Tunability |
| --- | --- | --- | --- | --- | --- |
| gpt-4 | OpenAI  | Yes | ★☆☆  |  - | No |
| gpt-3.5-turbo | OpenAI | Yes | ★★☆  |  175B | No |
| gpt-3 | OpenAI | No  |  ★☆☆ |  175B | No |
| ada, babbage, curie |  OpenAI | No | ★★★  |  350M - 7B | Yes |
| claude | Anthropic  | No | ★★☆  |  52B | No  |
| claude-instant | Anthropic  | No | ★★★  |  52B | No |
| command-xlarge | Cohere | No |  ★★☆ |  50B | Yes |
| command-medium | Cohere | No |  ★★★ |  6B | Yes |
| BERT | Google  | No | ★★★  | 345M  | Yes |
|  T5 | Google  | No | ★★☆  |  11B | Yes |
| PaLM  | Google  | No |  ★☆☆ |  540B | Yes |
| LLaMA | Meta AI  | Yes | ★★☆  |  65B | Yes |
|  CTRL | Salesforce  | No | ★★★  | 1.6B  | Yes |
| Dolly 2.0  | Databricks | No | ★★☆  |  12B | Yes  |
## 3**. Level 1: Langchain & Weaviate**
### **3.1.** Developer Intent at Level 1**: Langchain & Weaviate LLM Wrapper**
![infographic (2).png](Going%20beyond%20Langchain%20+%20Weaviate%20and%20towards%20a%20pr%207351d77a1eba40aab4394c24bef3a278/infographic_(2)%202.png)
This step is basically an upgrade to the current state of the art LLM UX/UI where we add:
- Permanent LTM memory (data store)
As a developer, I need to answer questions on large PDFs that I cant simply pass to the LLM due to technical limitations. The primary issue being addressed is the constraint on prompt length. As of now, GPT-4 has a limit of 4k tokens for both the prompt and the response combined. So, if the prompt comprises 3.5k tokens, the response can only be 0.5k tokens long.
- LLM Framework like Langchain to adapt any document type to vector store
Using Langchain provides a neat abstraction for me to get started quickly, connect to VectorDB, and get fast results.
- Some higher level structured storage (dlthub)
![Untitled](Going%20beyond%20Langchain%20+%20Weaviate%20and%20towards%20a%20pr%207351d77a1eba40aab4394c24bef3a278/Untitled%203.png)
### **3.2. Translating Theory into Practice: POC at Level 1**
- LLMs cant process all the data that a large PDF could contain. So, we need a place to store the PDF and a way to retrieve relevant information from it, so it can be passed on to the LLM.
- When trying to build and process documents or user inputs, its important to store them in a Vector Database to be able to retrieve the information when needed, along with the past context.
- A vector database is the optimal solution because it enables efficient storage, retrieval, and processing of high-dimensional data, making it ideal for applications like document search and user input analysis where context and similarity are important.
- For the past several months, there has been a surge of projects that personalize LLMs by storing user settings and information in a VectorDB so they can be easily retrieved and used as input for the LLM.
This can be done by storing data in the Weaviate Vector Database; then, we can process our PDF.
- We start by converting the PDF and translating it
![carbon (5).png](Going%20beyond%20Langchain%20+%20Weaviate%20and%20towards%20a%20pr%207351d77a1eba40aab4394c24bef3a278/carbon_(5).png)
- the next step we store the PDF to Weaviate
![carbon (6).png](Going%20beyond%20Langchain%20+%20Weaviate%20and%20towards%20a%20pr%207351d77a1eba40aab4394c24bef3a278/carbon_(6).png)
- We load the data into some type of database using dlthub
![carbon (9).png](Going%20beyond%20Langchain%20+%20Weaviate%20and%20towards%20a%20pr%207351d77a1eba40aab4394c24bef3a278/carbon_(9).png)
The parallel with our memory components becomes clearer at this stage. We have some way to define inputs which correspond to SM, while STM and LTM are starting to become two separate, clearly distinguishable entities. It becomes evident that we need to separate LTM data according to domains it belongs to but, at this point, a clear structure for how that would work has not yet emerged.
In addition, we can treat GPT as limited working memory and its context size as how much our model can remember during one operation.
Its evident that, if we dont manage the working memory well, we will overload it and fail to retrieve outputs. So, we will need to take a closer look into how humans do the same and how our working memory manages millions of facts, emotions, and senses swirling around our minds.
Lets break down the Data Platform components at this stage:
| Memory type | State | Description |
| --- | --- | --- |
| Sensory Memory | Command line interface + arguments | Can be interpreted in this context as the arguments provided to the script |
| STM | Partially Vector store, partially working memory | The processing layer and a storage of the session/user context |
| LTM | Vector store | The raw document storage |
**Sensory Memory**
Sensory memory can be seen as an input buffer where the information from the environment is stored temporarily. In our case, its the arguments we give to the command line script.
**STM**
STM is often associated with the concept of "working memory," which holds and manipulates information for short periods.
In our case, it is the time during which the process runs.
**LTM**
LTM can be conceptualized as a database in software systems. Databases store, organize, and retrieve data over extended periods. The information in LTM is organized and indexed, similar to how databases use tables, keys, and indexes to categorize and retrieve data efficiently.
**VectorDB: The LTM Storage of Our AI Data Platform**
Unlike traditional relational databases, that store data in tables, and newer NoSQL databases like MongoDB, that use JSON documents, vector databases specifically store and fetch vector embeddings.
Vector databases are crucial for Large Language Models and other modern, resource-hungry applications. They're designed for handling vector data, commonly used in fields like computer graphics, Machine Learning, and Geographic Information Systems.
Vector databases hinge on vector embeddings. These embeddings, packed with semantic details, help AI systems to understand data and retain long-term memory. They're condensed snapshots of training data and act as filters when processing new data in the inference stage of machine learning.
**Problems**:
- Interoperability
- Maintainability
- Fault Tolerance
**Next steps:**
1. Create a standardized data model
2. Dockerize the component
3. Create a FastAPI endpoint
### **3.4. Summary - The thing startup bros pitch to VCs**
| Description | Use-Case | Summary | Knowledge | Maturity | Production readiness |
| --- | --- | --- | --- | --- | --- |
| Interface Endpoint for the Foundational Model | Store data and query it for a particular use-case | Langchain + Weaviate to improve users conversations + prompt engineering to get better outputs | SM is somewhat modifiable, STM is not clearly defined, LTM is a VectorDB | Works 25% of time | Lacks Interoperability, Maintainability, Fault Tolerance Has some: Reusability, Portability, Extendability |
### 3.5. Addendum - Frameworks and Vector DBs in the space: Langchain, Weaviate and others
- A brief on each provider, relevant model and its role in the modern data space.
- The list of models and providers in the space
| Tool/Service | Tool type | Ease of use | Maturity | Docs | Production readiness | |
| --- | --- | --- | --- | --- | --- | --- |
| Langchain | Orchestration framework | ★★☆  | ★☆☆  | ★★☆  | ★☆☆  | |
| Weaviate | VectorDB | ★★☆  | ★★☆  | ★★☆  | ★★☆  | |
| Pinecone | VectorDB | ★★☆  | ★★☆  | ★★☆  | ★★☆  | |
| ChromaDB | VectorDB | ★★☆  | ★☆☆  | ★☆☆  | ★☆☆  | |
| Haystack | Orchestration framework | ★★☆  | ★☆☆  | ★★☆  | ★☆☆  | |
| Huggingface's New Agent System | Orchestration framework | ★★☆  | ★☆☆  | ★★☆  | ★☆☆  | |
| Milvus | VectorDB | ★★☆  | ★☆☆  | ★★☆  | ★☆☆  | |
| https://gpt-index.readthedocs.io/ | Orchestration framework | ★★☆  | ★☆☆  | ★★☆  | ★☆☆  | |
| | | | | | | |
## **Resources**
### **Blog Posts:**
1. **[Large Action Models](https://blog.salesforceairesearch.com/large-action-models/)**
2. **[Making Data Ingestion Production-Ready: A LangChain-Powered Airbyte Destination](https://blog.langchain.dev/making-data-ingestion-production-ready-a-langchain-powered-airbyte-destination/)**
3. **[The Problem with LangChain](https://minimaxir.com/2023/07/langchain-problem/)**
### **Research Papers (ArXiv):**
1. **[Research Paper 1](https://arxiv.org/pdf/2303.17580.pdf)**
2. **[Research Paper 2](https://arxiv.org/abs/2210.03629)**
3. **[Research Paper 3](https://arxiv.org/abs/2302.01560)**
### **Web Comics:**
1. **[xkcd comic](https://xkcd.com/927/)**
### **Reddit Discussions:**
1. **[Reddit Discussion: The Problem with LangChain](https://www.reddit.com/r/MachineLearning/comments/14zlaz6/d_the_problem_with_langchain/)**
### **Developer Blog Posts:**
1. **[Unlocking the Power of Enterprise-Ready LLMS with NeMo](https://developer.nvidia.com/blog/unlocking-the-power-of-enterprise-ready-llms-with-nemo/)**
### **Industry Analysis:**
1. **[Emerging Architectures for LLM Applications](https://a16z.com/2023/06/20/emerging-architectures-for-llm-applications/)**
### **Prompt Engineering:**
1. **[Prompting Guide](https://www.promptingguide.ai/)**
2. **[Tree of Thought Prompting: Walking the Path of Unique Approach to Problem Solving](https://www.promptengineering.org/tree-of-thought-prompting-walking-the-path-of-unique-approach-to-problem-solving/)**
## Conclusion
If you enjoy the content or want to try out `cognee` please check out the [github](https://github.com/topoteretes/cognee) and give us a star!

View file

@ -1,183 +0,0 @@
---
draft: False
date: 2023-10-05
tags:
- pydantic
- langchain
- llm
- openai
- functions
- pdfs
authors:
- tricalt
---
# Going beyond Langchain + Weaviate: Level 2 towards Production
### 1.1. The problem of putting code to production
*This post is a part of a series of texts aiming to discover and understand patterns and practices that would enable building a production-ready AI data infrastructure. The main focus is on how to evolve data modeling and retrieval in order to enable Large Language Model (LLM) apps and Agents to serve millions of users concurrently.*
*For a broad overview of the problem and our understanding of the current state of the LLM landscape, check out [our previous post](https://www.prometh.ai/promethai-memory-blog-post-one)*
![infographic (2).png](Going%20beyond%20Langchain%20+%20Weaviate%20Level%202%20towards%20%2098ad7b915139478992c4c4386b5e5886/infographic_(2).png)
In this text, we continue our inquiry into what would constitute:
1. Proper data engineering methods for LLMs
2. A production-ready generative AI data platform that unlocks AI assistants/Agent Networks
To explore these points, we here at [prometh.ai](http://prometh.ai/) have partnered with dlthub in order to productionize a common use case — complex PDF processing — progressing level by level.
In the previous text, we wrote a simple script that relies on the Weaviate Vector database to turn unstructured data into structured data and help us make sense of it.
In this post, some of the shortcomings from the previous level will be addressed, including::
1. Containerization
2. Data model
3. Data contract
4. Vector Database retrieval strategies
5. LLM context and task generation
6. Dynamic Agent behavior and Agent tooling
## 3. Level 2: Memory Layer + FastAPI + Langchain + Weaviate
### 3.1. Developer Intent at Level 2
This phase enhances the basic script by incorporating:
- Memory Manager
The memory manager facilitates the execution and processing of VectorDB data by:
1. Uniformly applying CRUD (Create, Read, Update, Delete) operations across various classes
2. Representing different business domains or concepts, and
3. Ensuring they adhere to a common data model, which regulates all data points across the system.
- Context Manager
This central component processes and analyzes data from Vector DB, evaluates its significance, and compares the results with user-defined benchmarks.
The primary objective is to establish a mechanism that encourages in-context learning and empowers the Agents adaptive understanding.
As an example, lets assume we uploaded the book *A Call of the Wild* by Jack London to our Vector DB semantic layer, to give our LLM a better understanding of the life of sled dogs in the early 1900s.
Asking a question about the contents of the book will yield a straightforward answer, provided that the book contains an explicit answer to our question.
To enable better question answering and access to additional information such as historical context, summaries, and other documents, we need to introduce different memory stores and a set of **attention modulators**, which are meant to manage the prioritization of data retrieved for the answers.
- Task Manager
Utilizing the tools at hand and guided by the user's prompt, the task manager determines a sequence of actions and their execution order.
For example, lets assume that the user asks: “When was Buck (one of the dogs from *A Call of the Wild*) kidnapped” and to have the answer translated to German”
This query would be broken down by the task manager into a set of atomic tasks that can then be handed over to the Agent.
The ordered task list could be:
1. Retrieve information about the PDF from the database.
2. Translate the information to German.
- The Agent
AI agents can use computers independently. They can browse the web, use apps, read and write files, make credit card payments, and even autonomously execute processes on your personal computer.
In our case, the Agent has only a few tools at its disposal, such as tools to translate text or structure data. Using these tools, it processes and executes tasks in the sequence they are provided by the Task Manager and the Context Manager.
### 3.2 **Toward the memory layer** - POC at level 2
![Untitled](Going%20beyond%20Langchain%20+%20Weaviate%20Level%202%20towards%20%2098ad7b915139478992c4c4386b5e5886/Untitled.png)
At this stage, our proof of concept (POC) allows uploading a PDF document and requesting specific actions on it such as "load to database", "translate to German", or "convert to JSON." Prior task resolutions and potential operations are assessed by the Context Manager and Task Manager services.
The following set of steps explains the workflow of the POC at level 2:
- Initially, we specify the parameters for the document we wish to upload and define our objective in the prompt:
![Untitled](Going%20beyond%20Langchain%20+%20Weaviate%20Level%202%20towards%20%2098ad7b915139478992c4c4386b5e5886/Untitled%201.png)
- The memory manager retrieves the parameters and the attention modulators and creates context based on Episodic and Semantic memory stores (previous runs of the job + raw data):
![carbon (23).png](Going%20beyond%20Langchain%20+%20Weaviate%20Level%202%20towards%20%2098ad7b915139478992c4c4386b5e5886/carbon_(23).png)
- To do this, it starts by filtering user input, in the same way our brains filter important from redundant information. As an example, if there are children playing and talking loudly in the background during our Zoom meeting, we can still pool our attention together and focus on what the person on the other side is saying.
The same principle is applied here:
![carbon (19).png](Going%20beyond%20Langchain%20+%20Weaviate%20Level%202%20towards%20%2098ad7b915139478992c4c4386b5e5886/carbon_(19).png)
- In the next step, we apply a set of attention modulators to process the data obtained from the Vector Store.
*NOTE: In cognitive science, attention modulators can be thought of as factors or mechanisms that influence the direction and intensity of attention.*
*As we have many memory stores, we need to prioritize the data points that we retrieve via semantic search.*
*Since semantic search is not enough by itself, scoring data points happens via a set of functions that replicate how attention modulators work in cognitive science.*
Initially, weve implemented a few attention modulators that we thought could improve the document retrieval process:
**Frequency**: This refers to how often a specific stimulus or event is encountered. Stimuli that are encountered more frequently are more likely to be attended to or remembered.
**Recency**: This refers to how recently a stimulus or event was encountered. Items or events that occurred more recently are typically easier to recall than those that occurred a long time ago.
We have implemented many more, and you can find them in our
[repository](https://github.com/topoteretes/PromethAI-Memory). More are still needed and contributions are more than welcome.
Lets see the modulators in action:
![carbon (20).png](Going%20beyond%20Langchain%20+%20Weaviate%20Level%202%20towards%20%2098ad7b915139478992c4c4386b5e5886/carbon_(20).png)
In the code above we fetch the memories from the Semantic Memory bank where our knowledge of the world is stored (the PDFs). We select the relevant documents by using the handle_modulator function.
- The handle_modulator function is defined below and explains how scoring of memories happens.
![carbon (21).png](Going%20beyond%20Langchain%20+%20Weaviate%20Level%202%20towards%20%2098ad7b915139478992c4c4386b5e5886/carbon_(21).png)
We process the data retrieved with OpenAI functions and store the results for the Task Manager to be able to determine what actions the Agent should take.
The Task Manager then sorts and converts user input into a set of actionable steps based on the tools available.
![carbon (22).png](Going%20beyond%20Langchain%20+%20Weaviate%20Level%202%20towards%20%2098ad7b915139478992c4c4386b5e5886/carbon_(22).png)
Finally, the Agent interprets the context and performs the steps using the tools it has available. We see this as the step where the Agents take over the task, executing it in their own way.
Now, let's look back at what constitutes the Data Platform:
| Memory type | State | Description |
| --- | --- | --- |
| Sensory Memory | API | Can be interpreted in this context as the interface used for the human input |
| STM | Weaviate Class with hardcoded contract | The processing layer and a storage of the session/user context |
| LTM | Weaviate Class with hardcoded contract | The information storage |
Lacks:
- Extendability: Capability to add features or functionality.
- Loading flexibility: Ability to apply different chunking strategies
- Testability: How to test the code and make sure it runs
**Next Steps**:
1. Implement different strategies for vector search
2. Add more tools to process PDFs
3. Add more attention modulators
4. Add a solid test framework
## Conclusion
If you enjoy the content or want to try out `cognee` please check out the [github](https://github.com/topoteretes/cognee) and give us a star!

View file

@ -1,193 +0,0 @@
---
draft: False
date: 2023-10-05
tags:
- pydantic
- langchain
- llm
- openai
- functions
- pdfs
authors:
- tricalt
---
# Going beyond Langchain + Weaviate: Level 3 towards production
### **Preface**
This post is part of a series of texts aiming to explore and understand patterns and practices that enable the construction of a production-ready AI data infrastructure. The main focus of the series is on the modeling and retrieval of evolving data, which would empower Large Language Model (LLM) apps and Agents to serve millions of users concurrently.
For a broad overview of the problem and our understanding of the current state of the LLM landscape, check out our initial post [here](https://www.prometh.ai/promethai-memory-blog-post-one).
In this post, we delve into context enrichment and testing in Retrieval Augmented Generation (RAG) applications.
RAG applications can retrieve relevant information from a knowledge base and generate detailed, context-aware answers to user queries.
As we are trying to improve on the base information LLMs are giving us, we need to be able to retrieve and understand more complex data, which can be stored in various data stores, in many formats, and using different techniques.
All of this leads to a lot of opportunities, but also creates a lot of confusion in generating and using RAG applications and extending the existing context of LLMs with new knowledge.
### **1. Context Enrichment and Testing in RAG Applications**
In navigating the complexities of RAG applications, the first challenge we face is the need for robust testing. Determining whether augmenting a LLM's context with additional information will yield better results is far from straightforward and often relies on subjective assessments.
Imagine, for instance, adding the digital version of the book *The Adventures of Tom Sawyer* to the LLM's database in order to enrich its context and obtain more detailed answers about the book's content for a paper we're writing. To evaluate this enhancement, we need a way to measure the accuracy of the responses before and after adding the book while considering the variations of every adjustable parameter.
### **2. Adjustable Parameters in RAG Applications**
The end-to-end process of enhancing RAG applications involves various adjustable parameters, which offer multiple paths toward achieving similar goals with varying outcomes. These parameters include:
1. Number of documents loaded into memory.
2. Size of each sub-document chunk uploaded.
3. Overlap between documents uploaded.
4. Relationship between documents (Parent-Son etc.)
5. Type of embedding used for data-to-vector conversion (OpenAI, Cohere, or any other embedding method).
6. Metadata structure for data navigation.
7. Indexes and data structures.
8. Search methods (text, semantic, or fusion search).
9. Output retrieval and scoring methods.
10. Integration of outputs with other data for in-context learning.
11. Structure of the final output.
### **3. The Role of Memory Manager at Level 3**
**Memory Layer + FastAPI + Langchain + Weaviate**
**3.1. Developer Intent at Level 3**
The goal we set for our system in our [initial post](https://www.prometh.ai/promethai-memory-blog-post-one) — processing and creating structured data from PDFs — presented an interesting set of problems to solve. OpenAI functions and [dlthub](https://dlthub.com/) allowed us to accomplish this task relatively quickly.
The real issue arises when we try to scale this task — this is what our [second post](https://www.notion.so/Going-beyond-Langchain-Weaviate-Level-2-towards-Production-98ad7b915139478992c4c4386b5e5886?pvs=21) tried to address. In addition, retrieving meaningful data from the Vector Databases turned out to be much more challenging than initially imagined.
In this post, well discuss how we can establish a testing method, improve our ability to retrieve the information we've processed, and make the codebase more robust and production-ready.
Well primarily focus on the following:
1. Memory Manager
The Memory Manager is a set of functions and tools for creating dynamic memory objects. In our previous blog posts, we explored the application of concepts from cognitive science —  Short-Term Memory, Long-Term Memory, and Cognitive Buffer — on Agent Network development.
We might need to add more memory domains to the process, as sticking to just these three can pose limitations. Changes in the codebase now enable real-time creation of dynamic memory objects, which have hierarchical relationships and can relate to each other.
2. RAG test tool
The RAG test tool allows us to control critical parameters for optimizing and testing RAG applications, including chunk size, chunk overlap, search type, metadata structure, and more.
The Memory Manager is a crucial component of any cognitive architecture platform. In our previous posts, weve discussed how to turn unstructured data to structured, how to relate concepts to each other in the vector store, and which problems can arise when productionizing these systems.
While weve addressed many open questions, many still remain. Based on our surveys and interviews with field experts, applications utilizing Memory components face the following challenges:
1. Inability to reliably link between Memories
Relying solely on semantic search or its derivatives to recognize the similarities between terms like "pair" and "combine" is a step forward. However, actually defining, capturing, and quantifying the relationships between any two objects would aid future memory access.
Solution: Graphs/Traditional DB
2. Failure to structure and organize Memories
We used OpenAI functions to structure and organize different Memory elements and convert them into understandable JSONs. Nevertheless, our surveys indicate that many people struggle with metadata management and the structure of retrievals. Ideally, these aspects should all be managed and organized in one place.
Solution: OpenAI functions/Data contracting/Metadata management
3. Hierarchy, size, and relationships of individual Memory elements
Although semantic search helps us understand the same concepts, we need to add more abstract concepts and ideas and link them. The ultimate goal is to emulate human understanding of the world, which comprises basic concepts that, when combined, create higher complexity objects.
Solution: Graphs/Custom solutions
4. Evaluation possibilities of memory components (can they be distilled to True/False)
Based on the [psycholinguistic theories proposed by Walter Kintsch](https://www.colorado.edu/ics/sites/default/files/attached-files/90-15.pdf), any cognitive system should be able to provide True/False evaluations. Kintsch defines a basic memory component, a proposition, which can be evaluated as True or False and can interlink with other Memory components.
A proposition could be, for example, "The sky is blue," and its evaluation to True/False could lead to actions such as "Do not bring an umbrella" or "Wear a t-shirt."
Potential solution: Particular memory structure
### Testability of Memory components
We should have a reliable method to test Memory components, at scale, for any number of use-cases. We need benchmarks across every level of testing to capture and define predicted behavior.
Suppose we need to test if Memory data from six months ago can be retrieved by our system and measure how much it contributes to a response that spans memories that are years old.
Solution: RAG testing framework
![Dashboard_example.png](Going%20beyond%20Langchain%20+%20Weaviate%20Level%203%20towards%20%20e62946c272bf412584b12fbbf92d35b0/Dashboard_example.png)
Lets look at the RAG testing framework:
It allows to you to test and combine all variations of:
1. Number of documents loaded into memory. ✅
2. Size of each sub-document chunk uploaded. ✅
3. Overlap between documents uploaded. ✅
4. Relationship between documents (Parent-Son etc.) 👷🏻‍♂️
5. Type of embedding used for data-to-vector conversion (OpenAI, Cohere, or any other embedding method). ✅
6. Metadata structure for data navigation. ✅
7. Indexes and data structures. ✅
8. Search methods (text, semantic, or fusion search). ✅
9. Output retrieval and scoring methods. 👷🏻‍♂️
10. Integration of outputs with other data for in-context learning. 👷🏻‍♂️
11. Structure of the final output. ✅
These parameters and results of the tests will be stored in Postgres database and can be visualized using Superset
To try it, navigate to: https://github.com/topoteretes/PromethAI-Memory
Copy the .env.template to .env and fill in the variables
Specify the environment variable in the .env file to "local"
Use the poetry environment:
`poetry shell`
Change the .env file Environment variable to "local"
Launch the postgres DB
`docker compose up postgres`
Launch the superset
`docker compose up superset`
Open the superset in your browser
`http://localhost:8088` Add the Postgres datasource to the Superset with the following connection string:
`postgres://bla:bla@postgres:5432/bubu`
Make sure to run to initialize DB tables
`python scripts/create_database.py`
After that, you can run the RAG test manager from your command line.
```
python rag_test_manager.py \
--file ".data" \
--test_set "example_data/test_set.json" \
--user_id "97980cfea0067" \
--params "chunk_size" "search_type" \
--metadata "example_data/metadata.json" \
--retriever_type "single_document_context"
```
Examples of metadata structure and test set are in the folder "example_data"
## Conclusion
If you enjoy the content or want to try out `cognee` please check out the [github](https://github.com/topoteretes/cognee) and give us a star!

View file

@ -1,120 +0,0 @@
---
draft: False
date: 2023-12-05
tags:
- pydantic
- langchain
- llm
- openai
- functions
- pdfs
authors:
- tricalt
---
# Going beyond Langchain + Weaviate: Level 4 towards production
### **Preface**
This post is part of a series of texts aiming to explore and understand patterns and practices that enable the construction of a production-ready AI data infrastructure. The series mainly focuses on the modeling and retrieval of evolving data, which would empower Large Language Model (LLM) apps and Agents to serve millions of users concurrently.
For a broad overview of the problem and our understanding of the current state of the LLM landscape, check out our initial post [here](https://www.prometh.ai/promethai-memory-blog-post-one).
![infographic (2).png](Topoteretes%20-%20General%20d6a605ab1d8243e489146b82eca935a1/PromethAI%20-%20long-term%20vision%20cf4f1d9b21d04239905d02322f0609c5/Berlin%20meetup%20-%20product%20demo%201283443e7b204c71a3ba8d291cf11f68/Blog%20post%20b6bd59a859fe4b4cb954760c94548ff2/Going%20beyond%20Langchain%20+%20Weaviate%20Level%202%20towards%20%2098ad7b915139478992c4c4386b5e5886/infographic_(2).png)
In this post, we delve into creating an initial data platform that can represent the core component of the future MlOps stack. Building a data platform is a big challenge in itself, and many solutions are available to help automate data tracking, ingestion, data contracting, monitoring, and warehousing.
In the last decade, data analytics and engineering fields have undergone significant transformations, shifting from storing data in centralized, siloed Oracle and SQL Server warehouses to a more agile, modular approach involving real-time data and cloud solutions like BigQuery and Snowflake.
Data processing evolved from an inessential activity, whose value would be inflated to please investors during the startup valuation phase, to a fundamental component of product development.
As we enter a new paradigm of interacting with systems through natural language, it's important to recognize that, while this method promises efficiency, it also comes with the challenges inherent in the imperfections of human language.
Suppose we want to use natural language as a new programming tool. In that case, we will need to either impose more constraints on it or make our systems more flexible so that they can adapt to the equivocal nature of language and information.
Our main goal should be to offer consistency, reproducibility and more that would ideally use language as a basic building block for things to come.
In order to come up with a set of solutions that could enable us to move forward, in this series of posts, we call on theoretical models from cognitive science and try to incorporate them into data engineering practices .
## **Level 4: Memory architecture and a first integration with keepi.ai**
In our [initial post](https://www.notion.so/Going-beyond-Langchain-Weaviate-and-towards-a-production-ready-modern-data-platform-7351d77a1eba40aab4394c24bef3a278?pvs=21)**,** we started out conceptualizing a simple retrieval-augmented generation (RAG) model whose aim was to process and understand PDF documents.
We faced many bottlenecks in scaling these tasks, so in our [second post](https://www.notion.so/Going-beyond-Langchain-Weaviate-Level-2-towards-Production-98ad7b915139478992c4c4386b5e5886?pvs=21), we needed to introduce the concept of memory domains..
In the [next step](https://www.notion.so/Going-beyond-Langchain-Weaviate-Level-3-towards-production-e62946c272bf412584b12fbbf92d35b0?pvs=21), the focus was mainly on understanding what makes a good RAG considering all possible variables.
In this post, we address the fundamental question of the feasibility of extending LLMs beyond the data on which they were trained.
As a Microsoft research team recently [stated](https://www.microsoft.com/en-us/research/blog/graphrag-unlocking-llm-discovery-on-narrative-private-data/):
- Baseline RAG struggles to connect the dots when answering a question requires providing synthesized insights by traversing disparate pieces of information through their shared attributes.
- Baseline RAG performs poorly when asked to understand summarized semantic concepts holistically over large data collections or even singular large documents.
To fill these gaps in RAG performance, we built a new framework—[cognee](https://www.notion.so/Change-button-Submit-appearance-when-clicked-on-www-prometh-ai-13e59427636940598a0fd3938a2d2253?pvs=21).
Cognee *combines human-inspired cognitive processes with efficient data management practices, infusing data points with more meaningful relationships to represent the (often messy) natural world in code more accurately.*
Our observations indicate that systems, agents, and interactions often falter due to overextension and haste.
However, given the extensive demands and expectations surrounding Large Language Models (LLMs), addressing every aspect—agents, actions, integrations, and schedulers—is beyond the scope of the frameworks mission.
We've chosen to prioritize data, recognizing that the crux of many issues has already been addressed within the realm of data engineering.
We aim to establish a framework that includes file storage, tracing, and the development of robust AI memory data pipelines to help us manage and structure data more efficiently through its transformation processes.
Subsequently, our goal will be to devise methods for navigating diverse information segments and determine the most effective application of graph databases to store this data.
Our initial hypothesis—enhancing data management in vector stores through manipulative techniques and attention modulators for input and retrieval—proved less effective than anticipated.
Deconstructing and reorganizing data via graph databases emerged as a superior strategy, allowing us to adapt and repurpose existing tools for our needs more effectively.
| AI Memory type | State in Level 2 | State in Level 4 | Description |
| --- | --- | --- | --- |
| Sensory Memory | API | API | Can be interpreted in this context as the interface used for the human input |
| STM | Weaviate Class with hardcoded contract | Neo4j with a connection to a Weaviate class | The processing layer and a storage of the session/user context |
| LTM | Weaviate Class with hardcoded contract | Neo4j with a connection to a Weaviate class | The information storage |
On Level 4, we describe the integration of keepi, a chatGPT-powered WhatsApp bot that collects and summarizes information, via API endpoints.
Then, once weve ensured that we have a robust, scalable infrastructure, we deploy cognee to the cloud.
### **Workflow Overview**
![How_cognee_works.png](Going%20beyond%20Langchain%20+%20Weaviate%20Level%204%20towards%20%20fe90ff40e56e44c4a49f1492d360173c/How_cognee_works.png)
Steps:
1. Users submit queries or documents for storage via the [keepi.ai](http://keepi.ai/) WhatsApp bot. This step integrates with the [keepi.ai](http://keepi.ai/) platform, utilizing Cognee endpoints for processing.
2. The Cognee manager handles the incoming request and collaborates with several components:
1. Relational database: Manages state and metadata related to operations.
2. Classifier: Identifies, organizes, and enhances the content.
3. Loader: Archives data in vector databases.
3. The Graph Manager and Vector Store Manager collaboratively process and organize the input into structured nodes. A key function of the system involves breaking down user input into propositions—basic statements retaining factual content. These propositions are interconnected through relationships and cataloged in the Neo4j database by the Graph Manager, associated with specific user nodes. Users are represented by memory nodes that capture various memory levels, some of which link back to the raw data in vector databases.
### **Whats next**
We're diligently developing our upcoming features, with key objectives including:
1. Numerically defining and organizing the strengths of relationships within graphs.
2. Creating a structured data model with opinions to facilitate document structure and data extraction.
3. Converting Cognee into a Python library for easier integration.
4. Broadening our database compatibility to support a broader range of systems.
Make sure to explore our [implementation](https://github.com/topoteretes/cognee) on GitHub, and, if you find it valuable, consider starring it to show your support.
## Conclusion
If you enjoy the content or want to try out `cognee` please check out the [github](https://github.com/topoteretes/cognee) and give us a star!

View file

@ -1,15 +0,0 @@
---
draft: True
date: 2024-04-02
tags:
- pydantic
- langchain
- llm
- openai
- functions
- pdfs
authors:
- tricalt
---
#tbd

View file

@ -1,15 +0,0 @@
---
draft: True
date: 2024-04-02
tags:
- pydantic
- langchain
- llm
- openai
- functions
- pdfs
authors:
- tricalt
---
#tbd

View file

@ -6,15 +6,9 @@
To run cognee, you will need the following:
1. A running postgres instance
2. OpenAI API key (Ollama or Anyscale could work as [well](local_models.md))
1. OpenAI API key (Ollama or Anyscale could work as [well](local_models.md))
Navigate to cognee folder and run
```
docker compose up postgres
```
Add your LLM API key to the enviroment variables
Add your LLM API key to the environment variables
```
import os
@ -28,10 +22,28 @@ cognee.config.llm_api_key = "YOUR_OPENAI_API_KEY"
```
If you are using Networkx, create an account on Graphistry to vizualize results:
```
cognee.config.set_graphistry_username = "YOUR_USERNAME"
cognee.config.set_graphistry_password = "YOUR_PASSWORD"
cognee.config.set_graphistry_config({
"username": "YOUR_USERNAME",
"password": "YOUR_PASSWORD"
})
```
If you want to run Postgres instead of Sqlite, run postgres Docker container.
Navigate to cognee folder and run:
```
docker compose up postgres
```
Add the following environment variables to .env file
```
DB_HOST=127.0.0.1
DB_PORT=5432
DB_USERNAME=cognee # or any username you want
DB_PASSWORD=cognee # or any password you want
DB_NAME=cognee_db # or any db name you want
DB_PROVIDER=postgres
```
## Run
cognee is asynchronous by design, meaning that operations like adding information, processing it, and querying it can run concurrently without blocking the execution of other tasks.
@ -43,15 +55,15 @@ import cognee
text = """Natural language processing (NLP) is an interdisciplinary
subfield of computer science and information retrieval"""
cognee.add(text) # Add a new piece of information
await cognee.add(text) # Add a new piece of information
cognee.cognify() # Use LLMs and cognee to create knowledge
await cognee.cognify() # Use LLMs and cognee to create knowledge
search_results = cognee.search("SIMILARITY", {'query': 'Tell me about NLP'}) # Query cognee for the knowledge
search_results = await cognee.search("INSIGHTS", {'query': 'Tell me about NLP'}) # Query cognee for the knowledge
for result_text in search_results[0]:
for result_text in search_results:
print(result_text)
```
In the example above, we add a piece of information to cognee, use LLMs to create a GraphRAG, and then query cognee for the knowledge.
cognee is composable and you can build your own cognee pipelines using our [templates.](templates.md)
cognee is composable and you can build your own cognee pipelines using our [templates.](templates.md)

View file

@ -6,54 +6,16 @@ This module contains the search function that is used to search for nodes in the
The `SearchType` enum defines the different types of searches that can be performed:
- `ADJACENT`: Search for nodes adjacent to a given node.
- `TRAVERSE`: Traverse the graph to find related nodes.
- `SIMILARITY`: Find nodes similar to a given node.
- `SUMMARY`: Retrieve a summary of the node.
- `SUMMARY_CLASSIFICATION`: Classify the summary of the node.
- `NODE_CLASSIFICATION`: Classify the node.
- `DOCUMENT_CLASSIFICATION`: Classify the document.
- `CYPHER`: Perform a Cypher query on the graph.
- `INSIGHTS`: Search for insights from the knowledge graph.
- `SUMMARIES`: Search for summaries of the texts provided.
- `CHUNKS`: Search for the whole chunks of data.
### Search Parameters
The `SearchParameters` class is a Pydantic model that validates and holds the search parameters:
```python
class SearchParameters(BaseModel):
search_type: SearchType
params: Dict[str, Any]
@field_validator("search_type", mode="before")
def convert_string_to_enum(cls, value):
if isinstance(value, str):
return SearchType.from_str(value)
return value
```
### Search Function
The `search` function is the main entry point for performing a search. It handles user authentication, retrieves document IDs for the user, and filters the search results based on user permissions.
```python
async def search(search_type: str, params: Dict[str, Any], user: User = None) -> List:
if user is None:
user = await get_default_user()
own_document_ids = await get_document_ids_for_user(user.id)
search_params = SearchParameters(search_type=search_type, params=params)
search_results = await specific_search([search_params])
from uuid import UUID
filtered_search_results = []
for search_result in search_results:
document_id = search_result["document_id"] if "document_id" in search_result else None
document_id = UUID(document_id) if type(document_id) == str else document_id
if document_id is None or document_id in own_document_ids:
filtered_search_results.append(search_result)
return filtered_search_results
from cognee import search, SearchType
await search(SearchType.INSIGHTS, "your_query")
```

View file

@ -15,4 +15,5 @@ if [ "$ENVIRONMENT" = "dev" ]; then
fi
else
gunicorn -w 3 -k uvicorn.workers.UvicornWorker -t 30000 --bind=0.0.0.0:8000 --log-level error cognee.api.client:app
# python ./cognee/api/client.py
fi

View file

@ -98,11 +98,10 @@ async def cognify_search_base_rag(content:str, context:str):
return return_
async def cognify_search_graph(content:str, context:str):
from cognee.api.v1.search.search import search
search_type = 'SIMILARITY'
from cognee.api.v1.search import search, SearchType
params = {'query': 'Donald Trump'}
results = await search(search_type, params)
results = await search(SearchType.INSIGHTS, params)
print("results", results)
return results

119
poetry.lock generated
View file

@ -168,13 +168,13 @@ docs = ["sphinx (==7.2.6)", "sphinx-mdinclude (==0.5.3)"]
[[package]]
name = "alembic"
version = "1.13.2"
version = "1.13.3"
description = "A database migration tool for SQLAlchemy."
optional = false
python-versions = ">=3.8"
files = [
{file = "alembic-1.13.2-py3-none-any.whl", hash = "sha256:6b8733129a6224a9a711e17c99b08462dbf7cc9670ba8f2e2ae9af860ceb1953"},
{file = "alembic-1.13.2.tar.gz", hash = "sha256:1ff0ae32975f4fd96028c39ed9bb3c867fe3af956bd7bb37343b54c9fe7445ef"},
{file = "alembic-1.13.3-py3-none-any.whl", hash = "sha256:908e905976d15235fae59c9ac42c4c5b75cfcefe3d27c0fbf7ae15a37715d80e"},
{file = "alembic-1.13.3.tar.gz", hash = "sha256:203503117415561e203aa14541740643a611f641517f0209fcae63e9fa09f1a2"},
]
[package.dependencies]
@ -6599,69 +6599,70 @@ files = [
[[package]]
name = "sqlalchemy"
version = "2.0.21"
version = "2.0.35"
description = "Database Abstraction Library"
optional = false
python-versions = ">=3.7"
files = [
{file = "SQLAlchemy-2.0.21-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1e7dc99b23e33c71d720c4ae37ebb095bebebbd31a24b7d99dfc4753d2803ede"},
{file = "SQLAlchemy-2.0.21-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7f0c4ee579acfe6c994637527c386d1c22eb60bc1c1d36d940d8477e482095d4"},
{file = "SQLAlchemy-2.0.21-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f7d57a7e140efe69ce2d7b057c3f9a595f98d0bbdfc23fd055efdfbaa46e3a5"},
{file = "SQLAlchemy-2.0.21-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ca38746eac23dd7c20bec9278d2058c7ad662b2f1576e4c3dbfcd7c00cc48fa"},
{file = "SQLAlchemy-2.0.21-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3cf229704074bce31f7f47d12883afee3b0a02bb233a0ba45ddbfe542939cca4"},
{file = "SQLAlchemy-2.0.21-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fb87f763b5d04a82ae84ccff25554ffd903baafba6698e18ebaf32561f2fe4aa"},
{file = "SQLAlchemy-2.0.21-cp310-cp310-win32.whl", hash = "sha256:89e274604abb1a7fd5c14867a412c9d49c08ccf6ce3e1e04fffc068b5b6499d4"},
{file = "SQLAlchemy-2.0.21-cp310-cp310-win_amd64.whl", hash = "sha256:e36339a68126ffb708dc6d1948161cea2a9e85d7d7b0c54f6999853d70d44430"},
{file = "SQLAlchemy-2.0.21-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bf8eebccc66829010f06fbd2b80095d7872991bfe8415098b9fe47deaaa58063"},
{file = "SQLAlchemy-2.0.21-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b977bfce15afa53d9cf6a632482d7968477625f030d86a109f7bdfe8ce3c064a"},
{file = "SQLAlchemy-2.0.21-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ff3dc2f60dbf82c9e599c2915db1526d65415be323464f84de8db3e361ba5b9"},
{file = "SQLAlchemy-2.0.21-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44ac5c89b6896f4740e7091f4a0ff2e62881da80c239dd9408f84f75a293dae9"},
{file = "SQLAlchemy-2.0.21-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:87bf91ebf15258c4701d71dcdd9c4ba39521fb6a37379ea68088ce8cd869b446"},
{file = "SQLAlchemy-2.0.21-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b69f1f754d92eb1cc6b50938359dead36b96a1dcf11a8670bff65fd9b21a4b09"},
{file = "SQLAlchemy-2.0.21-cp311-cp311-win32.whl", hash = "sha256:af520a730d523eab77d754f5cf44cc7dd7ad2d54907adeb3233177eeb22f271b"},
{file = "SQLAlchemy-2.0.21-cp311-cp311-win_amd64.whl", hash = "sha256:141675dae56522126986fa4ca713739d00ed3a6f08f3c2eb92c39c6dfec463ce"},
{file = "SQLAlchemy-2.0.21-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:56628ca27aa17b5890391ded4e385bf0480209726f198799b7e980c6bd473bd7"},
{file = "SQLAlchemy-2.0.21-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:db726be58837fe5ac39859e0fa40baafe54c6d54c02aba1d47d25536170b690f"},
{file = "SQLAlchemy-2.0.21-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e7421c1bfdbb7214313919472307be650bd45c4dc2fcb317d64d078993de045b"},
{file = "SQLAlchemy-2.0.21-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:632784f7a6f12cfa0e84bf2a5003b07660addccf5563c132cd23b7cc1d7371a9"},
{file = "SQLAlchemy-2.0.21-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:f6f7276cf26145a888f2182a98f204541b519d9ea358a65d82095d9c9e22f917"},
{file = "SQLAlchemy-2.0.21-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2a1f7ffac934bc0ea717fa1596f938483fb8c402233f9b26679b4f7b38d6ab6e"},
{file = "SQLAlchemy-2.0.21-cp312-cp312-win32.whl", hash = "sha256:bfece2f7cec502ec5f759bbc09ce711445372deeac3628f6fa1c16b7fb45b682"},
{file = "SQLAlchemy-2.0.21-cp312-cp312-win_amd64.whl", hash = "sha256:526b869a0f4f000d8d8ee3409d0becca30ae73f494cbb48801da0129601f72c6"},
{file = "SQLAlchemy-2.0.21-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7614f1eab4336df7dd6bee05bc974f2b02c38d3d0c78060c5faa4cd1ca2af3b8"},
{file = "SQLAlchemy-2.0.21-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d59cb9e20d79686aa473e0302e4a82882d7118744d30bb1dfb62d3c47141b3ec"},
{file = "SQLAlchemy-2.0.21-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a95aa0672e3065d43c8aa80080cdd5cc40fe92dc873749e6c1cf23914c4b83af"},
{file = "SQLAlchemy-2.0.21-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:8c323813963b2503e54d0944813cd479c10c636e3ee223bcbd7bd478bf53c178"},
{file = "SQLAlchemy-2.0.21-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:419b1276b55925b5ac9b4c7044e999f1787c69761a3c9756dec6e5c225ceca01"},
{file = "SQLAlchemy-2.0.21-cp37-cp37m-win32.whl", hash = "sha256:4615623a490e46be85fbaa6335f35cf80e61df0783240afe7d4f544778c315a9"},
{file = "SQLAlchemy-2.0.21-cp37-cp37m-win_amd64.whl", hash = "sha256:cca720d05389ab1a5877ff05af96551e58ba65e8dc65582d849ac83ddde3e231"},
{file = "SQLAlchemy-2.0.21-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b4eae01faee9f2b17f08885e3f047153ae0416648f8e8c8bd9bc677c5ce64be9"},
{file = "SQLAlchemy-2.0.21-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3eb7c03fe1cd3255811cd4e74db1ab8dca22074d50cd8937edf4ef62d758cdf4"},
{file = "SQLAlchemy-2.0.21-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2d494b6a2a2d05fb99f01b84cc9af9f5f93bf3e1e5dbdafe4bed0c2823584c1"},
{file = "SQLAlchemy-2.0.21-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b19ae41ef26c01a987e49e37c77b9ad060c59f94d3b3efdfdbf4f3daaca7b5fe"},
{file = "SQLAlchemy-2.0.21-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:fc6b15465fabccc94bf7e38777d665b6a4f95efd1725049d6184b3a39fd54880"},
{file = "SQLAlchemy-2.0.21-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:014794b60d2021cc8ae0f91d4d0331fe92691ae5467a00841f7130fe877b678e"},
{file = "SQLAlchemy-2.0.21-cp38-cp38-win32.whl", hash = "sha256:0268256a34806e5d1c8f7ee93277d7ea8cc8ae391f487213139018b6805aeaf6"},
{file = "SQLAlchemy-2.0.21-cp38-cp38-win_amd64.whl", hash = "sha256:73c079e21d10ff2be54a4699f55865d4b275fd6c8bd5d90c5b1ef78ae0197301"},
{file = "SQLAlchemy-2.0.21-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:785e2f2c1cb50d0a44e2cdeea5fd36b5bf2d79c481c10f3a88a8be4cfa2c4615"},
{file = "SQLAlchemy-2.0.21-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c111cd40910ffcb615b33605fc8f8e22146aeb7933d06569ac90f219818345ef"},
{file = "SQLAlchemy-2.0.21-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9cba4e7369de663611ce7460a34be48e999e0bbb1feb9130070f0685e9a6b66"},
{file = "SQLAlchemy-2.0.21-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50a69067af86ec7f11a8e50ba85544657b1477aabf64fa447fd3736b5a0a4f67"},
{file = "SQLAlchemy-2.0.21-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ccb99c3138c9bde118b51a289d90096a3791658da9aea1754667302ed6564f6e"},
{file = "SQLAlchemy-2.0.21-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:513fd5b6513d37e985eb5b7ed89da5fd9e72354e3523980ef00d439bc549c9e9"},
{file = "SQLAlchemy-2.0.21-cp39-cp39-win32.whl", hash = "sha256:f9fefd6298433b6e9188252f3bff53b9ff0443c8fde27298b8a2b19f6617eeb9"},
{file = "SQLAlchemy-2.0.21-cp39-cp39-win_amd64.whl", hash = "sha256:2e617727fe4091cedb3e4409b39368f424934c7faa78171749f704b49b4bb4ce"},
{file = "SQLAlchemy-2.0.21-py3-none-any.whl", hash = "sha256:ea7da25ee458d8f404b93eb073116156fd7d8c2a776d8311534851f28277b4ce"},
{file = "SQLAlchemy-2.0.21.tar.gz", hash = "sha256:05b971ab1ac2994a14c56b35eaaa91f86ba080e9ad481b20d99d77f381bb6258"},
{file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:67219632be22f14750f0d1c70e62f204ba69d28f62fd6432ba05ab295853de9b"},
{file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4668bd8faf7e5b71c0319407b608f278f279668f358857dbfd10ef1954ac9f90"},
{file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb8bea573863762bbf45d1e13f87c2d2fd32cee2dbd50d050f83f87429c9e1ea"},
{file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f552023710d4b93d8fb29a91fadf97de89c5926c6bd758897875435f2a939f33"},
{file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:016b2e665f778f13d3c438651dd4de244214b527a275e0acf1d44c05bc6026a9"},
{file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7befc148de64b6060937231cbff8d01ccf0bfd75aa26383ffdf8d82b12ec04ff"},
{file = "SQLAlchemy-2.0.35-cp310-cp310-win32.whl", hash = "sha256:22b83aed390e3099584b839b93f80a0f4a95ee7f48270c97c90acd40ee646f0b"},
{file = "SQLAlchemy-2.0.35-cp310-cp310-win_amd64.whl", hash = "sha256:a29762cd3d116585278ffb2e5b8cc311fb095ea278b96feef28d0b423154858e"},
{file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e21f66748ab725ade40fa7af8ec8b5019c68ab00b929f6643e1b1af461eddb60"},
{file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8a6219108a15fc6d24de499d0d515c7235c617b2540d97116b663dade1a54d62"},
{file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:042622a5306c23b972192283f4e22372da3b8ddf5f7aac1cc5d9c9b222ab3ff6"},
{file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:627dee0c280eea91aed87b20a1f849e9ae2fe719d52cbf847c0e0ea34464b3f7"},
{file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4fdcd72a789c1c31ed242fd8c1bcd9ea186a98ee8e5408a50e610edfef980d71"},
{file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:89b64cd8898a3a6f642db4eb7b26d1b28a497d4022eccd7717ca066823e9fb01"},
{file = "SQLAlchemy-2.0.35-cp311-cp311-win32.whl", hash = "sha256:6a93c5a0dfe8d34951e8a6f499a9479ffb9258123551fa007fc708ae2ac2bc5e"},
{file = "SQLAlchemy-2.0.35-cp311-cp311-win_amd64.whl", hash = "sha256:c68fe3fcde03920c46697585620135b4ecfdfc1ed23e75cc2c2ae9f8502c10b8"},
{file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:eb60b026d8ad0c97917cb81d3662d0b39b8ff1335e3fabb24984c6acd0c900a2"},
{file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6921ee01caf375363be5e9ae70d08ce7ca9d7e0e8983183080211a062d299468"},
{file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8cdf1a0dbe5ced887a9b127da4ffd7354e9c1a3b9bb330dce84df6b70ccb3a8d"},
{file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93a71c8601e823236ac0e5d087e4f397874a421017b3318fd92c0b14acf2b6db"},
{file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e04b622bb8a88f10e439084486f2f6349bf4d50605ac3e445869c7ea5cf0fa8c"},
{file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1b56961e2d31389aaadf4906d453859f35302b4eb818d34a26fab72596076bb8"},
{file = "SQLAlchemy-2.0.35-cp312-cp312-win32.whl", hash = "sha256:0f9f3f9a3763b9c4deb8c5d09c4cc52ffe49f9876af41cc1b2ad0138878453cf"},
{file = "SQLAlchemy-2.0.35-cp312-cp312-win_amd64.whl", hash = "sha256:25b0f63e7fcc2a6290cb5f7f5b4fc4047843504983a28856ce9b35d8f7de03cc"},
{file = "SQLAlchemy-2.0.35-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f021d334f2ca692523aaf7bbf7592ceff70c8594fad853416a81d66b35e3abf9"},
{file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05c3f58cf91683102f2f0265c0db3bd3892e9eedabe059720492dbaa4f922da1"},
{file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:032d979ce77a6c2432653322ba4cbeabf5a6837f704d16fa38b5a05d8e21fa00"},
{file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:2e795c2f7d7249b75bb5f479b432a51b59041580d20599d4e112b5f2046437a3"},
{file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:cc32b2990fc34380ec2f6195f33a76b6cdaa9eecf09f0c9404b74fc120aef36f"},
{file = "SQLAlchemy-2.0.35-cp37-cp37m-win32.whl", hash = "sha256:9509c4123491d0e63fb5e16199e09f8e262066e58903e84615c301dde8fa2e87"},
{file = "SQLAlchemy-2.0.35-cp37-cp37m-win_amd64.whl", hash = "sha256:3655af10ebcc0f1e4e06c5900bb33e080d6a1fa4228f502121f28a3b1753cde5"},
{file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4c31943b61ed8fdd63dfd12ccc919f2bf95eefca133767db6fbbd15da62078ec"},
{file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a62dd5d7cc8626a3634208df458c5fe4f21200d96a74d122c83bc2015b333bc1"},
{file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0630774b0977804fba4b6bbea6852ab56c14965a2b0c7fc7282c5f7d90a1ae72"},
{file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d625eddf7efeba2abfd9c014a22c0f6b3796e0ffb48f5d5ab106568ef01ff5a"},
{file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ada603db10bb865bbe591939de854faf2c60f43c9b763e90f653224138f910d9"},
{file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c41411e192f8d3ea39ea70e0fae48762cd11a2244e03751a98bd3c0ca9a4e936"},
{file = "SQLAlchemy-2.0.35-cp38-cp38-win32.whl", hash = "sha256:d299797d75cd747e7797b1b41817111406b8b10a4f88b6e8fe5b5e59598b43b0"},
{file = "SQLAlchemy-2.0.35-cp38-cp38-win_amd64.whl", hash = "sha256:0375a141e1c0878103eb3d719eb6d5aa444b490c96f3fedab8471c7f6ffe70ee"},
{file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ccae5de2a0140d8be6838c331604f91d6fafd0735dbdcee1ac78fc8fbaba76b4"},
{file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2a275a806f73e849e1c309ac11108ea1a14cd7058577aba962cd7190e27c9e3c"},
{file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:732e026240cdd1c1b2e3ac515c7a23820430ed94292ce33806a95869c46bd139"},
{file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:890da8cd1941fa3dab28c5bac3b9da8502e7e366f895b3b8e500896f12f94d11"},
{file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c0d8326269dbf944b9201911b0d9f3dc524d64779a07518199a58384c3d37a44"},
{file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b76d63495b0508ab9fc23f8152bac63205d2a704cd009a2b0722f4c8e0cba8e0"},
{file = "SQLAlchemy-2.0.35-cp39-cp39-win32.whl", hash = "sha256:69683e02e8a9de37f17985905a5eca18ad651bf592314b4d3d799029797d0eb3"},
{file = "SQLAlchemy-2.0.35-cp39-cp39-win_amd64.whl", hash = "sha256:aee110e4ef3c528f3abbc3c2018c121e708938adeeff9006428dd7c8555e9b3f"},
{file = "SQLAlchemy-2.0.35-py3-none-any.whl", hash = "sha256:2ab3f0336c0387662ce6221ad30ab3a5e6499aab01b9790879b6578fd9b8faa1"},
{file = "sqlalchemy-2.0.35.tar.gz", hash = "sha256:e11d7ea4d24f0a262bccf9a7cd6284c976c5369dac21db237cff59586045ab9f"},
]
[package.dependencies]
greenlet = {version = "!=0.4.17", optional = true, markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\" or extra == \"asyncio\""}
typing-extensions = ">=4.2.0"
greenlet = {version = "!=0.4.17", optional = true, markers = "python_version < \"3.13\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\") or extra == \"asyncio\""}
typing-extensions = ">=4.6.0"
[package.extras]
aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"]
aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing-extensions (!=3.10.0.1)"]
aioodbc = ["aioodbc", "greenlet (!=0.4.17)"]
aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing_extensions (!=3.10.0.1)"]
asyncio = ["greenlet (!=0.4.17)"]
asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"]
mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"]
@ -6671,7 +6672,7 @@ mssql-pyodbc = ["pyodbc"]
mypy = ["mypy (>=0.910)"]
mysql = ["mysqlclient (>=1.4.0)"]
mysql-connector = ["mysql-connector-python"]
oracle = ["cx-oracle (>=7)"]
oracle = ["cx_oracle (>=8)"]
oracle-oracledb = ["oracledb (>=1.0.1)"]
postgresql = ["psycopg2 (>=2.7)"]
postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"]
@ -6681,7 +6682,7 @@ postgresql-psycopg2binary = ["psycopg2-binary"]
postgresql-psycopg2cffi = ["psycopg2cffi"]
postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"]
pymysql = ["pymysql"]
sqlcipher = ["sqlcipher3-binary"]
sqlcipher = ["sqlcipher3_binary"]
[[package]]
name = "squarify"
@ -7726,4 +7727,4 @@ weaviate = ["weaviate-client"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.9.0,<3.12"
content-hash = "04a941703580f22f94b6bb9d1be1253a35869c2da63109557f122cf9d00aa586"
content-hash = "75d65fd1b99bf9db84fe026d140f6cb05b02afd31d4ad82a6286076256bd7609"

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "cognee"
version = "0.1.16"
version = "0.1.17"
description = "Cognee - is a library for enriching LLM context with a semantic layer for better understanding and reasoning."
authors = ["Vasilije Markovic", "Boris Arzentar"]
readme = "README.md"
@ -26,7 +26,7 @@ fastapi = "^0.109.2"
uvicorn = "0.22.0"
boto3 = "^1.26.125"
gunicorn = "^20.1.0"
sqlalchemy = "2.0.21"
sqlalchemy = "2.0.35"
instructor = "1.3.5"
networkx = "^3.2.1"
debugpy = "1.8.2"
@ -69,6 +69,7 @@ pdfplumber = "^0.11.1"
sentry-sdk = {extras = ["fastapi"], version = "^2.9.0"}
fastapi-users = { version = "*", extras = ["sqlalchemy"] }
asyncpg = "^0.29.0"
alembic = "^1.13.3"