diff --git a/cognitive_architecture/infrastructure/llm/openai/adapter.py b/cognitive_architecture/infrastructure/llm/openai/adapter.py index ef8fd8f3b..02c71f51c 100644 --- a/cognitive_architecture/infrastructure/llm/openai/adapter.py +++ b/cognitive_architecture/infrastructure/llm/openai/adapter.py @@ -180,11 +180,11 @@ class OpenAIAdapter(LLMInterface): return embeddings - async def acreate_structured_output(self, text_input: str, system_prompt_path: str, response_model: Type[BaseModel], model:str) -> BaseModel: + async def acreate_structured_output(self, text_input: str, system_prompt_path: str, response_model: Type[BaseModel]) -> BaseModel: """Generate a response from a user query.""" system_prompt = read_query_prompt(system_prompt_path) return self.aclient.chat.completions.create( - model=model, + model=self.model, messages=[ { "role": "user", diff --git a/cognitive_architecture/modules/cognify/llm/classify_content.py b/cognitive_architecture/modules/cognify/llm/classify_content.py index 65f8a4c41..7561eabfe 100644 --- a/cognitive_architecture/modules/cognify/llm/classify_content.py +++ b/cognitive_architecture/modules/cognify/llm/classify_content.py @@ -1,13 +1,15 @@ +from pydantic import BaseModel +from typing import Type from cognitive_architecture.infrastructure.llm.get_llm_client import get_llm_client +from cognitive_architecture.shared.data_models import ContentPrediction - -async def content_to_cog_layers(memory_name: str, payload: list): +async def content_to_cog_layers(text_input: str,system_prompt_path:str, response_model: Type[BaseModel]): llm_client = get_llm_client() # data_points = list() # for point in map(create_data_point, payload): # data_points.append(await point) - return await llm_client.acreate_structured_output(memory_name, payload, model="text-embedding-ada-002") + return await llm_client.acreate_structured_output(text_input,system_prompt_path, response_model) diff --git a/cognitive_architecture/modules/cognify/llm/content_to_cog_layers.py b/cognitive_architecture/modules/cognify/llm/content_to_cog_layers.py index 0a1d6a548..6ceada36e 100644 --- a/cognitive_architecture/modules/cognify/llm/content_to_cog_layers.py +++ b/cognitive_architecture/modules/cognify/llm/content_to_cog_layers.py @@ -1,14 +1,16 @@ +from typing import Type +from pydantic import BaseModel from cognitive_architecture.infrastructure.llm.get_llm_client import get_llm_client -async def content_to_cog_layers(memory_name: str, payload: list): +async def content_to_cog_layers(text_input: str,system_prompt_path:str, response_model: Type[BaseModel]): llm_client = get_llm_client() # data_points = list() # for point in map(create_data_point, payload): # data_points.append(await point) - return await llm_client.acreate_structured_output(memory_name, payload, model="text-embedding-ada-002") + return await llm_client.acreate_structured_output(text_input,system_prompt_path, response_model) diff --git a/cognitive_architecture/modules/cognify/llm/content_to_propositions.py b/cognitive_architecture/modules/cognify/llm/content_to_propositions.py index cc94340c3..b91a1ed71 100644 --- a/cognitive_architecture/modules/cognify/llm/content_to_propositions.py +++ b/cognitive_architecture/modules/cognify/llm/content_to_propositions.py @@ -1,9 +1,11 @@ """ Content to Propositions""" +from typing import Type +from pydantic import BaseModel from cognitive_architecture.infrastructure.llm.get_llm_client import get_llm_client -async def generate_graph(memory_name: str, payload: str): +async def generate_graph(text_input:str,system_prompt_path:str, response_model: Type[BaseModel]): doc_path = "cognitive_architecture/infrastructure/llm/prompts/generate_graph_prompt.txt" llm_client = get_llm_client() - return await llm_client.generate_graph(memory_name, doc_path=doc_path,payload= payload) + return await llm_client.generate_graph(text_input,system_prompt_path, response_model) diff --git a/cognitive_architecture/shared/data_models.py b/cognitive_architecture/shared/data_models.py index 8ed374908..928fcf26a 100644 --- a/cognitive_architecture/shared/data_models.py +++ b/cognitive_architecture/shared/data_models.py @@ -161,7 +161,7 @@ class ProceduralContent(ContentType): type = "PROCEDURAL" subclass: List[ProceduralSubclass] -class SinglePrediction(BaseModel): +class ContentPrediction(BaseModel): """Class for a single class label prediction.""" label: Union[TextContent, AudioContent, ImageContent, VideoContent, MultimediaContent, Model3DContent, ProceduralContent]