From 3d6d0a0a89889337c645293bb54202de1ab014fc Mon Sep 17 00:00:00 2001 From: Vasilije <8619304+Vasilije1990@users.noreply.github.com> Date: Sun, 14 Jan 2024 08:59:32 +0100 Subject: [PATCH] Reduce size of context, hack for now --- main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/main.py b/main.py index a392dfa6f..13389f98f 100644 --- a/main.py +++ b/main.py @@ -470,8 +470,8 @@ async def user_context_enrichment(session, user_id:str, query:str, generative_re context = f""" You are a memory system that uses cognitive architecture to enrich the LLM context and provide better query response. You have access to the following information: - EPISODIC MEMORY: {episodic_mem[:500]} - SEMANTIC MEMORY: {semantic_mem[:500]} + EPISODIC MEMORY: {episodic_mem[:1000]} + SEMANTIC MEMORY: {semantic_mem[:1000]} PROCEDURAL MEMORY: NULL SEARCH CONTEXT: The following documents provided with sources they were extracted from could be used to provide an answer {search_context}