diff --git a/cognitive_architecture/modules/cognify/llm/classify_content.py b/cognitive_architecture/modules/cognify/llm/classify_content.py index 7561eabfe..8b9a30eb1 100644 --- a/cognitive_architecture/modules/cognify/llm/classify_content.py +++ b/cognitive_architecture/modules/cognify/llm/classify_content.py @@ -13,3 +13,8 @@ async def content_to_cog_layers(text_input: str,system_prompt_path:str, response return await llm_client.acreate_structured_output(text_input,system_prompt_path, response_model) +if __name__ == "__main__": + + content_to_cog_layers("test", "test", ContentPrediction) + + diff --git a/cognitive_architecture/modules/cognify/llm/content_to_cog_layers.py b/cognitive_architecture/modules/cognify/llm/content_to_cog_layers.py index 6ceada36e..5aa1ce5e6 100644 --- a/cognitive_architecture/modules/cognify/llm/content_to_cog_layers.py +++ b/cognitive_architecture/modules/cognify/llm/content_to_cog_layers.py @@ -1,7 +1,7 @@ from typing import Type from pydantic import BaseModel from cognitive_architecture.infrastructure.llm.get_llm_client import get_llm_client - +from cognitive_architecture.shared.data_models import CognitiveLayer async def content_to_cog_layers(text_input: str,system_prompt_path:str, response_model: Type[BaseModel]): llm_client = get_llm_client() @@ -12,6 +12,10 @@ async def content_to_cog_layers(text_input: str,system_prompt_path:str, response return await llm_client.acreate_structured_output(text_input,system_prompt_path, response_model) +if __name__ == "__main__": + + content_to_cog_layers("test", "test", response_model=CognitiveLayer) + diff --git a/cognitive_architecture/modules/cognify/llm/content_to_propositions.py b/cognitive_architecture/modules/cognify/llm/content_to_propositions.py index b91a1ed71..6e3c36a4f 100644 --- a/cognitive_architecture/modules/cognify/llm/content_to_propositions.py +++ b/cognitive_architecture/modules/cognify/llm/content_to_propositions.py @@ -2,10 +2,15 @@ from typing import Type from pydantic import BaseModel from cognitive_architecture.infrastructure.llm.get_llm_client import get_llm_client - +from cognitive_architecture.shared.data_models import KnowledgeGraph async def generate_graph(text_input:str,system_prompt_path:str, response_model: Type[BaseModel]): doc_path = "cognitive_architecture/infrastructure/llm/prompts/generate_graph_prompt.txt" llm_client = get_llm_client() + return await llm_client.generate_graph(text_input,system_prompt_path, response_model) + +if __name__ == "__main__": + generate_graph("test", "test", response_model=KnowledgeGraph) + diff --git a/cognitive_architecture/shared/data_models.py b/cognitive_architecture/shared/data_models.py index 928fcf26a..2cfc7e92e 100644 --- a/cognitive_architecture/shared/data_models.py +++ b/cognitive_architecture/shared/data_models.py @@ -166,3 +166,16 @@ class ContentPrediction(BaseModel): label: Union[TextContent, AudioContent, ImageContent, VideoContent, MultimediaContent, Model3DContent, ProceduralContent] + +class CognitiveLayerSubgroup(BaseModel): + """ CognitiveLayerSubgroup in a general layer """ + id: int + name:str + description: str + + +class CognitiveLayer(BaseModel): + """Cognitive layer""" + category_name:str + cognitive_layers: List[CognitiveLayerSubgroup] = Field(..., default_factory=list) +