From eee6049e225e1c8eccca28b911225abf2dd6da22 Mon Sep 17 00:00:00 2001 From: Vasilije <8619304+Vasilije1990@users.noreply.github.com> Date: Sun, 14 Jan 2024 22:11:55 +0100 Subject: [PATCH] Reduce size of context, hack for now --- main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/main.py b/main.py index 920d7d90b..0cd2fac57 100644 --- a/main.py +++ b/main.py @@ -473,8 +473,8 @@ async def user_context_enrichment(session, user_id:str, query:str, generative_re context = f""" You are a memory system that uses cognitive architecture to enrich the LLM context and provide better query response. You have access to the following information: - EPISODIC MEMORY: {episodic_mem[:1000]} - SEMANTIC MEMORY: {semantic_mem[:1000]} + EPISODIC MEMORY: {episodic_mem[:200]} + SEMANTIC MEMORY: {semantic_mem[:200]} PROCEDURAL MEMORY: NULL SEARCH CONTEXT: The following documents provided with sources they were extracted from could be used to provide an answer {search_context}