Added graph intefrace, added neo4j + networkx structure and updates to the notebook

This commit is contained in:
Vasilije 2024-03-06 21:50:25 +01:00
parent 2433e4ed93
commit 876227e5a0
5 changed files with 16 additions and 10 deletions

View file

@ -180,11 +180,11 @@ class OpenAIAdapter(LLMInterface):
return embeddings return embeddings
async def acreate_structured_output(self, text_input: str, system_prompt_path: str, response_model: Type[BaseModel], model:str) -> BaseModel: async def acreate_structured_output(self, text_input: str, system_prompt_path: str, response_model: Type[BaseModel]) -> BaseModel:
"""Generate a response from a user query.""" """Generate a response from a user query."""
system_prompt = read_query_prompt(system_prompt_path) system_prompt = read_query_prompt(system_prompt_path)
return self.aclient.chat.completions.create( return self.aclient.chat.completions.create(
model=model, model=self.model,
messages=[ messages=[
{ {
"role": "user", "role": "user",

View file

@ -1,13 +1,15 @@
from pydantic import BaseModel
from typing import Type
from cognitive_architecture.infrastructure.llm.get_llm_client import get_llm_client from cognitive_architecture.infrastructure.llm.get_llm_client import get_llm_client
from cognitive_architecture.shared.data_models import ContentPrediction
async def content_to_cog_layers(text_input: str,system_prompt_path:str, response_model: Type[BaseModel]):
async def content_to_cog_layers(memory_name: str, payload: list):
llm_client = get_llm_client() llm_client = get_llm_client()
# data_points = list() # data_points = list()
# for point in map(create_data_point, payload): # for point in map(create_data_point, payload):
# data_points.append(await point) # data_points.append(await point)
return await llm_client.acreate_structured_output(memory_name, payload, model="text-embedding-ada-002") return await llm_client.acreate_structured_output(text_input,system_prompt_path, response_model)

View file

@ -1,14 +1,16 @@
from typing import Type
from pydantic import BaseModel
from cognitive_architecture.infrastructure.llm.get_llm_client import get_llm_client from cognitive_architecture.infrastructure.llm.get_llm_client import get_llm_client
async def content_to_cog_layers(memory_name: str, payload: list): async def content_to_cog_layers(text_input: str,system_prompt_path:str, response_model: Type[BaseModel]):
llm_client = get_llm_client() llm_client = get_llm_client()
# data_points = list() # data_points = list()
# for point in map(create_data_point, payload): # for point in map(create_data_point, payload):
# data_points.append(await point) # data_points.append(await point)
return await llm_client.acreate_structured_output(memory_name, payload, model="text-embedding-ada-002") return await llm_client.acreate_structured_output(text_input,system_prompt_path, response_model)

View file

@ -1,9 +1,11 @@
""" Content to Propositions""" """ Content to Propositions"""
from typing import Type
from pydantic import BaseModel
from cognitive_architecture.infrastructure.llm.get_llm_client import get_llm_client from cognitive_architecture.infrastructure.llm.get_llm_client import get_llm_client
async def generate_graph(memory_name: str, payload: str): async def generate_graph(text_input:str,system_prompt_path:str, response_model: Type[BaseModel]):
doc_path = "cognitive_architecture/infrastructure/llm/prompts/generate_graph_prompt.txt" doc_path = "cognitive_architecture/infrastructure/llm/prompts/generate_graph_prompt.txt"
llm_client = get_llm_client() llm_client = get_llm_client()
return await llm_client.generate_graph(memory_name, doc_path=doc_path,payload= payload) return await llm_client.generate_graph(text_input,system_prompt_path, response_model)

View file

@ -161,7 +161,7 @@ class ProceduralContent(ContentType):
type = "PROCEDURAL" type = "PROCEDURAL"
subclass: List[ProceduralSubclass] subclass: List[ProceduralSubclass]
class SinglePrediction(BaseModel): class ContentPrediction(BaseModel):
"""Class for a single class label prediction.""" """Class for a single class label prediction."""
label: Union[TextContent, AudioContent, ImageContent, VideoContent, MultimediaContent, Model3DContent, ProceduralContent] label: Union[TextContent, AudioContent, ImageContent, VideoContent, MultimediaContent, Model3DContent, ProceduralContent]