Merge pull request #6 from topoteretes/feature/improve_actions

Feature/improve actions
This commit is contained in:
Vasilije 2023-08-27 20:12:18 +02:00 committed by GitHub
commit 3983cbbf62
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
8 changed files with 1155 additions and 667 deletions

View file

@ -0,0 +1,36 @@
name: 'Build Docker images for PromethAI'
description: 'Build PromethAI-related Docker images and push to the Docker registry (AWS ECR)'
inputs:
stage:
description: 'The stage of the pipeline, such as "dev" or "prd", for the PromethAI app'
required: true
aws_account_id:
description: 'The AWS account ID for the PromethAI app'
required: true
should_publish:
description: 'Whether to publish the PromethAI Docker image to AWS ECR; should be either "true" or "false"'
required: true
ecr_image_repo_name:
description: 'The Docker image ECR repository name for the PromethAI app, such as "workflows"'
required: true
dockerfile_location:
description: 'The directory location of the Dockerfile for the PromethAI app'
required: true
runs:
using: "composite"
steps:
- name: Build PromethAI App Docker image
shell: bash
env:
STAGE: ${{ inputs.stage }}
run: |
export SHA_SHORT="$(git rev-parse --short HEAD)"
export CUR_DATE="$(date +%Y%m%d%H%M%S)"
export VERSION="${{ inputs.stage }}-$CUR_DATE-$SHA_SHORT"
export STAGE="${{ inputs.stage }}"
export APP_DIR="$PWD/${{ inputs.dockerfile_location }}"
image_name="${{ inputs.ecr_image_repo_name }}" docker_login="true" version="$VERSION" account="${{ inputs.aws_account_id }}" app_dir="$APP_DIR" publish="${{ inputs.should_publish }}" ./bin/dockerize
echo "Docker tag is: $VERSION"
echo $VERSION > /tmp/.DOCKER_IMAGE_VERSION

76
.github/workflows/cd.yaml vendored Normal file
View file

@ -0,0 +1,76 @@
name: Publishing promethai-backend Docker image
on:
push:
branches:
- dev
- feature/*
paths-ignore:
- '**.md'
env:
AWS_ROLE_DEV_CICD: "arn:aws:iam::463722570299:role/promethai-dev-base-role-github-ci-cd"
AWS_ACCOUNT_ID_DEV: "463722570299"
jobs:
publish_docker_to_ecr:
name: Publish Docker PromethAI image
runs-on: ubuntu-latest
permissions:
id-token: write
contents: read
steps:
- name: Take code from repo
uses: actions/checkout@v3
- name: Set environment variable for stage
id: set-env
run: |
if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
echo "STAGE=prd" >> $GITHUB_ENV
echo "::set-output name=stage::prd"
else
echo "STAGE=dev" >> $GITHUB_ENV
echo "::set-output name=stage::dev"
fi
- name: Use output
run: echo "The stage is ${{ steps.set-env.outputs.stage }}"
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v1
with:
role-to-assume: ${{ env.AWS_ROLE_DEV_CICD }}
aws-region: eu-west-1
- name: Create Docker image and push to ECR
uses: ./.github/actions/image_builder
id: generate-promethai-docker
with:
stage: dev
aws_account_id: ${{ env.AWS_ACCOUNT_ID_DEV }}
should_publish: true
ecr_image_repo_name: promethai-dev-backend-promethai-backend-memory
dockerfile_location: ./level_2
- name: Export Docker image tag
id: export-promethai-docker-tag
run: |
export DOCKER_TAG=$(cat /tmp/.DOCKER_IMAGE_VERSION)
echo "Docker tag is: $DOCKER_TAG"
echo "promethai_docker_tag_backend=$DOCKER_TAG" >> $GITHUB_OUTPUT
outputs:
promethai_docker_tag_backend: ${{ steps.export-promethai-docker-tag.outputs.promethai_docker_tag_backend }}
# apply_tf:
# name: Trigger terraform apply workflow
# runs-on: ubuntu-latest
# needs: publish_docker_to_ecr
# steps:
# - name: TF apply workflow triggers step
# uses: actions/github-script@v6
# with:
# github-token: ${{ secrets.PAT_FOR_CROSS_REPOS_CICD_TRIGGERING }}
# script: |
# await github.rest.actions.createWorkflowDispatch({
# owner: 'topoteretes',
# repo: 'PromethAI-Infra',
# workflow_id: 'terraform.apply.yml',
# ref: 'main'
# })

99
.github/workflows/cd_prd.yaml vendored Normal file
View file

@ -0,0 +1,99 @@
on:
push:
branches:
- main
paths-ignore:
- '**.md'
- 'examples/**'
name: Publishing promethai-backend Docker image to prd ECR
env:
AWS_ROLE_DEV_CICD: "arn:aws:iam::463722570299:role/promethai-dev-base-role-github-ci-cd"
AWS_ACCOUNT_ID_DEV: "463722570299"
ENVIRONMENT: prd
jobs:
publish_docker_to_ecr:
name: Publish Docker PromethAI image
runs-on: ubuntu-latest
permissions:
id-token: write
contents: read
steps:
- name: Take code from repo
uses: actions/checkout@v3
- name: Set environment variable for stage
id: set-env
run: |
if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
echo "STAGE=prd" >> $GITHUB_ENV
echo "::set-output name=stage::prd"
else
echo "STAGE=dev" >> $GITHUB_ENV
echo "::set-output name=stage::dev"
fi
- name: Use output
run: echo "The stage is ${{ steps.set-env.outputs.stage }}"
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v1
with:
role-to-assume: ${{ env.AWS_ROLE_DEV_CICD }}
aws-region: eu-west-1
- name: Create Docker image and push to ECR
uses: ./.github/actions/image_builder
id: generate-promethai-docker
with:
stage: prd
aws_account_id: ${{ env.AWS_ACCOUNT_ID_DEV }}
should_publish: true
ecr_image_repo_name: promethai-prd-backend-promethai-backend-memory
dockerfile_location: ./level_2
- name: Export Docker image tag
id: export-promethai-docker-tag
run: |
export DOCKER_TAG=$(cat /tmp/.DOCKER_IMAGE_VERSION)
echo "Docker tag is: $DOCKER_TAG"
echo "promethai_docker_tag_backend=$DOCKER_TAG" >> $GITHUB_OUTPUT
# - name: Create Tag and Release
# runs-on: ubuntu-latest
# uses: actions/checkout@v3
# needs: publish_docker_to_ecr # ensure this job runs after Docker image is pushed
# steps:
# - name: Check out code
# uses: actions/checkout@v3
# - name: Bump version and push tag
# id: bump_version_and_push_tag
# uses: anothrNick/github-tag-action@1.34.0
# env:
# GITHUB_TOKEN: ${{ secrets.PAT_FOR_CROSS_REPOS_CICD_TRIGGERING }}
# WITH_V: true
# DEFAULT_BUMP: 'minor' # or 'minor' or 'major'
# - name: Create Release
# id: create_release
# uses: actions/create-release@v1
# env:
# GITHUB_TOKEN: ${{ secrets.PAT_FOR_CROSS_REPOS_CICD_TRIGGERING }}
# with:
# tag_name: ${{ steps.bump_version_and_push_tag.outputs.tag }}
# release_name: Release ${{ steps.bump_version_and_push_tag.outputs.tag }}
outputs:
promethai_docker_tag_backend: ${{ steps.export-promethai-docker-tag.outputs.promethai_docker_tag_backend }}
# apply_tf:
# name: Trigger terraform apply workflow
# runs-on: ubuntu-latest
# needs: publish_docker_to_ecr
# steps:
# - name: TF apply workflow triggers step
# uses: actions/github-script@v6
# with:
# github-token: ${{ secrets.PAT_FOR_CROSS_REPOS_CICD_TRIGGERING }}
# script: |
# await github.rest.actions.createWorkflowDispatch({
# owner: 'topoteretes',
# repo: 'PromethAI-Infra',
# workflow_id: 'terraform.apply.yml',
# ref: 'main'
# })

25
.github/workflows/ci.yaml vendored Normal file
View file

@ -0,0 +1,25 @@
name: Test build docker image for PromethAI backend app
on: pull_request
env:
AWS_ACCOUNT_ID_DEV: "463722570299"
jobs:
build_docker:
name: Build PromethAI Backend Docker App Image
runs-on: ubuntu-latest
steps:
- name: Check out PromethAI code
uses: actions/checkout@v3
- name: Build PromethAI backend Docker image tag
id: backend-docker-tag
run: |
export SHA_SHORT="$(git rev-parse --short HEAD)"
export CUR_DATE="$(date +%Y%m%d%H%M%S)"
export VERSION="dev-$CUR_DATE-$SHA_SHORT"
image_name="backend-memory" docker_login="false" version="$VERSION" account="${{ env.AWS_ACCOUNT_ID_DEV }}" app_dir="level_2" publish="false" ./bin/dockerize
export DOCKER_TAG=$(cat level_2/tmp/.DOCKER_IMAGE_VERSION)
echo "Successfully built PromethAI backend Docker tag is: $DOCKER_TAG"

30
bin/dockerize Executable file
View file

@ -0,0 +1,30 @@
STAGE=${stage:-"dev"}
SHA_SHORT="$(git rev-parse --short HEAD)"
CUR_DATE="$(date +%Y%m%d%H%M%S)"
VERSION="$STAGE-$CUR_DATE-$SHA_SHORT"
IMAGE_NAME=${image_name:-promethai-${STAGE}-promethai-backend-memory}
REPO_NAME="${AWS_REPOSITORY}/${IMAGE_NAME}"
FULL_IMAGE_NAME="${REPO_NAME}:${VERSION}"
APP_DIR=${app_dir:-"./level_2"} # Updated this line
PUBLISH=${publish:-false}
echo "Building docker image ${FULL_IMAGE_NAME} located in dir ${app_dir}"
pushd "${APP_DIR}" &&
docker buildx build --platform linux/amd64 \
--build-arg STAGE=${STAGE} \
-t "${FULL_IMAGE_NAME}" . &&
echo "${VERSION}" >/tmp/.DOCKER_IMAGE_VERSION &&
echo "Successfully built docker image ${FULL_IMAGE_NAME}"
if [ "${PUBLISH}" = true ]; then
echo "Pushing docker image ${FULL_IMAGE_NAME} to ECR repository to AWS account ${AWS_DEPLOYMENT_ACCOUNT}"
if [ "${PUBLISH}" = true ]; then
echo "logging in"
aws ecr get-login-password --region "${AWS_REGION}" | docker login --username AWS --password-stdin "${AWS_REPOSITORY}"
fi
docker push "${FULL_IMAGE_NAME}" &&
echo "Successfully pushed docker image ${FULL_IMAGE_NAME} to ECR repository"
fi

View file

@ -9,7 +9,8 @@ Initial code lets you do three operations:
1. Add to memory
2. Retrieve from memory
3. Structure the data to schema and load to duckdb
3. Structure the data to schema
4. Load to a database
#How to use
@ -38,16 +39,17 @@ The Memory API provides the following endpoints:
- /run-buffer (POST)
- /buffer/create-context (POST)
Here is a payload example:
## How To Get Started
1. We do a post request to add-memory endpoint with the following payload:
It will upload Jack London "Call of the Wild" to SEMANTIC memory
```
{
curl -X POST http://localhost:8000/semantic/add-memory -H "Content-Type: application/json" -d '{
"payload": {
"user_id": "681",
"session_id": "471",
"model_speed": "slow",
"prompt": "I want ",
"pdf_url": "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf",
"prompt": "I am adding docs",
"params": {
"version": "1.0",
"agreement_id": "AG123456",
@ -60,7 +62,75 @@ Here is a payload example:
"license": "MIT",
"validity_start": "2023-08-01",
"validity_end": "2024-07-31"
},
"loader_settings": {
"format": "PDF",
"source": "url",
"path": "https://www.ibiblio.org/ebooks/London/Call%20of%20Wild.pdf"
}
}
}
```
}'
```
2. We run the buffer with the prompt "I want to know how does Buck adapt to life in the wild and then have that info translated to German "
```
curl -X POST http://localhost:8000/run-buffer -H "Content-Type: application/json" -d '{
"payload": {
"user_id": "681",
"prompt": "I want to know how does Buck adapt to life in the wild and then have that info translated to German ",
"params": {
"version": "1.0",
"agreement_id": "AG123456",
"privacy_policy": "https://example.com/privacy",
"terms_of_service": "https://example.com/terms",
"format": "json",
"schema_version": "1.1",
"checksum": "a1b2c3d4e5f6",
"owner": "John Doe",
"license": "MIT",
"validity_start": "2023-08-01",
"validity_end": "2024-07-31"
},
"attention_modulators": {
"relevance": 0.0,
"saliency": 0.1
}
}
}'
```
Other attention modulators that could be implemented:
"frequency": 0.5,
"repetition": 0.5,
"length": 0.5,
"position": 0.5,
"context": 0.5,
"emotion": 0.5,
"sentiment": 0.5,
"perspective": 0.5,
"style": 0.5,
"grammar": 0.5,
"spelling": 0.5,
"logic": 0.5,
"coherence": 0.5,
"cohesion": 0.5,
"plausibility": 0.5,
"consistency": 0.5,
"informativeness": 0.5,
"specificity": 0.5,
"detail": 0.5,
"accuracy": 0.5,
"topicality": 0.5,
"focus": 0.5,
"clarity": 0.5,
"simplicity": 0.5,
"naturalness": 0.5,
"fluency": 0.5,
"variety": 0.5,
"vividness": 0.5,
"originality": 0.5,
"creativity": 0.5,
"humor": 0.5,

View file

@ -1,23 +1,14 @@
from io import BytesIO
import logging
import os
from typing import Dict, Any
from langchain.document_loaders import PyPDFLoader
from level_2_pdf_vectorstore__dlt_contracts import Memory
import uvicorn
from fastapi import FastAPI
from fastapi.responses import JSONResponse
from pydantic import BaseModel
from typing import Dict, Any
import re
import json
import logging
import os
import uvicorn
from fastapi import Request
import yaml
from fastapi import HTTPException
from fastapi import FastAPI, UploadFile, File
from typing import List
import requests
from level_2_pdf_vectorstore__dlt_contracts import Memory
from dotenv import load_dotenv
# Set up logging
logging.basicConfig(
level=logging.INFO, # Set the logging level (e.g., DEBUG, INFO, WARNING, ERROR, CRITICAL)
@ -25,26 +16,21 @@ logging.basicConfig(
)
logger = logging.getLogger(__name__)
from dotenv import load_dotenv
load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
app = FastAPI(debug=True)
from fastapi import Depends
class ImageResponse(BaseModel):
success: bool
message: str
@app.get("/", )
@app.get(
"/",
)
async def root():
"""
Root endpoint that returns a welcome message.
@ -57,187 +43,99 @@ def health_check():
Health check endpoint that returns the server status.
"""
return {"status": "OK"}
#curl -X POST -H "Content-Type: application/json" -d '{"data": "YourPayload"}' -F "files=@/path/to/your/pdf/file.pdf" http://127.0.0.1:8000/upload/
class Payload(BaseModel):
payload: Dict[str, Any]
# @app.post("/upload/", response_model=dict)
# async def upload_pdf_and_payload(
# payload: Payload,
# # files: List[UploadFile] = File(...),
# ):
# try:
# # Process the payload
# decoded_payload = payload.payload
# # except:
# # pass
# #
# # return JSONResponse(content={"response": decoded_payload}, status_code=200)
#
# # Download the remote PDF if URL is provided
# if 'pdf_url' in decoded_payload:
# pdf_response = requests.get(decoded_payload['pdf_url'])
# pdf_content = pdf_response.content
#
# logging.info("Downloaded PDF from URL")
#
# # Create an in-memory file-like object for the PDF content
# pdf_stream = BytesIO(pdf_content)
#
# contents = pdf_stream.read()
#
# tmp_location = os.path.join('/tmp', "tmp.pdf")
# with open(tmp_location, 'wb') as tmp_file:
# tmp_file.write(contents)
#
# logging.info("Wrote PDF from URL")
#
# # Process the PDF using PyPDFLoader
# loader = PyPDFLoader(tmp_location)
# pages = loader.load_and_split()
# logging.info(" PDF split into pages")
# Memory_ = Memory(index_name="my-agent", user_id='555' )
# await Memory_.async_init()
# Memory_._add_episodic_memory(user_input="I want to get a schema for my data", content =pages)
#
#
# # Run the buffer
# response = Memory_._run_buffer(user_input="I want to get a schema for my data")
# return JSONResponse(content={"response": response}, status_code=200)
#
# #to do: add the user id to the payload
# #to do add the raw pdf to payload
# # bb = await Memory_._run_buffer(user_input=decoded_payload['prompt'])
# # print(bb)
#
#
# except Exception as e:
#
# return {"error": str(e)}
# # Here you can perform your processing on the PDF contents
# # results.append({"filename": file.filename, "size": len(contents)})
#
# # Append the in-memory file to the files list
# # files.append(UploadFile(pdf_stream, filename="downloaded.pdf"))
#
def memory_factory(memory_type):
load_dotenv()
class Payload(BaseModel):
payload: Dict[str, Any]
@app.post("/{memory_type}/add-memory", response_model=dict)
async def add_memory(
payload: Payload,
# files: List[UploadFile] = File(...),
payload: Payload,
# files: List[UploadFile] = File(...),
):
try:
logging.info(" Init PDF processing")
decoded_payload = payload.payload
if 'pdf_url' in decoded_payload:
pdf_response = requests.get(decoded_payload['pdf_url'])
pdf_content = pdf_response.content
Memory_ = Memory(user_id=decoded_payload["user_id"])
logging.info("Downloaded PDF from URL")
await Memory_.async_init()
# Create an in-memory file-like object for the PDF content
pdf_stream = BytesIO(pdf_content)
contents = pdf_stream.read()
tmp_location = os.path.join('/tmp', "tmp.pdf")
with open(tmp_location, 'wb') as tmp_file:
tmp_file.write(contents)
logging.info("Wrote PDF from URL")
# Process the PDF using PyPDFLoader
loader = PyPDFLoader(tmp_location)
# pages = loader.load_and_split()
logging.info(" PDF split into pages")
Memory_ = Memory(user_id=decoded_payload['user_id'])
await Memory_.async_init()
memory_class = getattr(Memory_, f"_add_{memory_type}_memory", None)
output= await memory_class(observation=str(loader), params =decoded_payload['params'])
return JSONResponse(content={"response": output}, status_code=200)
memory_class = getattr(Memory_, f"_add_{memory_type}_memory", None)
output = await memory_class(
observation=decoded_payload["prompt"],
loader_settings=decoded_payload["loader_settings"],
params=decoded_payload["params"],
)
return JSONResponse(content={"response": output}, status_code=200)
except Exception as e:
return JSONResponse(content={"response": {"error": str(e)}}, status_code=503)
return JSONResponse(
content={"response": {"error": str(e)}}, status_code=503
)
@app.post("/{memory_type}/fetch-memory", response_model=dict)
async def fetch_memory(
payload: Payload,
# files: List[UploadFile] = File(...),
payload: Payload,
# files: List[UploadFile] = File(...),
):
try:
decoded_payload = payload.payload
Memory_ = Memory(user_id=decoded_payload['user_id'])
Memory_ = Memory(user_id=decoded_payload["user_id"])
await Memory_.async_init()
memory_class = getattr(Memory_, f"_fetch_{memory_type}_memory", None)
output = memory_class(observation=decoded_payload['prompt'])
output = memory_class(observation=decoded_payload["prompt"])
return JSONResponse(content={"response": output}, status_code=200)
except Exception as e:
return JSONResponse(content={"response": {"error": str(e)}}, status_code=503)
return JSONResponse(
content={"response": {"error": str(e)}}, status_code=503
)
@app.post("/{memory_type}/delete-memory", response_model=dict)
async def delete_memory(
payload: Payload,
# files: List[UploadFile] = File(...),
payload: Payload,
# files: List[UploadFile] = File(...),
):
try:
decoded_payload = payload.payload
Memory_ = Memory(user_id=decoded_payload['user_id'])
Memory_ = Memory(user_id=decoded_payload["user_id"])
await Memory_.async_init()
memory_class = getattr(Memory_, f"_delete_{memory_type}_memory", None)
output = memory_class(observation=decoded_payload['prompt'])
output = memory_class(observation=decoded_payload["prompt"])
return JSONResponse(content={"response": output}, status_code=200)
except Exception as e:
return JSONResponse(
content={"response": {"error": str(e)}}, status_code=503
)
return JSONResponse(content={"response": {"error": str(e)}}, status_code=503)
memory_list = ["episodic", "buffer", "semantic"]
for memory_type in memory_list:
memory_factory(memory_type)
@app.get("/available-buffer-actions", response_model=dict)
async def available_buffer_actions(
payload: Payload,
# files: List[UploadFile] = File(...),
payload: Payload,
# files: List[UploadFile] = File(...),
):
try:
decoded_payload = payload.payload
Memory_ = Memory(user_id=decoded_payload['user_id'])
Memory_ = Memory(user_id=decoded_payload["user_id"])
await Memory_.async_init()
@ -246,126 +144,73 @@ async def available_buffer_actions(
return JSONResponse(content={"response": output}, status_code=200)
except Exception as e:
return JSONResponse(content={"response": {"error": str(e)}}, status_code=503)
@app.post("/run-buffer", response_model=dict)
async def available_buffer_actions(
payload: Payload,
# files: List[UploadFile] = File(...),
async def run_buffer(
payload: Payload,
# files: List[UploadFile] = File(...),
):
try:
decoded_payload = payload.payload
Memory_ = Memory(user_id=decoded_payload['user_id'])
Memory_ = Memory(user_id=decoded_payload["user_id"])
await Memory_.async_init()
# memory_class = getattr(Memory_, f"_delete_{memory_type}_memory", None)
output = await Memory_._run_buffer(user_input=decoded_payload['prompt'], params=decoded_payload['params'])
output = await Memory_._run_main_buffer(
user_input=decoded_payload["prompt"], params=decoded_payload["params"], attention_modulators=decoded_payload["attention_modulators"]
)
return JSONResponse(content={"response": output}, status_code=200)
except Exception as e:
return JSONResponse(content={"response": {"error": str(e)}}, status_code=503)
@app.post("/buffer/create-context", response_model=dict)
async def available_buffer_actions(
payload: Payload,
# files: List[UploadFile] = File(...),
async def create_context(
payload: Payload,
# files: List[UploadFile] = File(...),
):
try:
decoded_payload = payload.payload
Memory_ = Memory(user_id=decoded_payload['user_id'])
Memory_ = Memory(user_id=decoded_payload["user_id"])
await Memory_.async_init()
# memory_class = getattr(Memory_, f"_delete_{memory_type}_memory", None)
output = await Memory_._create_buffer_context(user_input=decoded_payload['prompt'], params=decoded_payload['params'])
output = await Memory_._create_buffer_context(
user_input=decoded_payload["prompt"], params=decoded_payload["params"], attention_modulators=decoded_payload["attention_modulators"]
)
return JSONResponse(content={"response": output}, status_code=200)
except Exception as e:
return JSONResponse(content={"response": {"error": str(e)}}, status_code=503)
#
# # Process each uploaded PDF file
# results = []
# for file in files:
# contents = await file.read()
# tmp_location = os.path.join('/tmp', "tmp.pdf")
# with open(tmp_location, 'wb') as tmp_file:
# tmp_file.write(contents)
# loader = PyPDFLoader(tmp_location)
# pages = loader.load_and_split()
#
# stm = ShortTermMemory(user_id=decoded_payload['user_id'])
# stm.episodic_buffer.main_buffer(prompt=decoded_payload['prompt'], pages=pages)
# # Here you can perform your processing on the PDF contents
# results.append({"filename": file.filename, "size": len(contents)})
#
# return {"message": "Upload successful", "results": results}
#
# except Exception as e:
# return {"error": str(e)}
@app.post("/buffer/get-tasks", response_model=dict)
async def create_context(
payload: Payload,
# files: List[UploadFile] = File(...),
):
try:
decoded_payload = payload.payload
Memory_ = Memory(user_id=decoded_payload["user_id"])
# @app.post("/clear-cache", response_model=dict)
# async def clear_cache(request_data: Payload) -> dict:
# """
# Endpoint to clear the cache.
#
# Parameters:
# request_data (Payload): The request data containing the user and session IDs.
#
# Returns:
# dict: A dictionary with a message indicating the cache was cleared.
# """
# json_payload = request_data.payload
# agent = Agent()
# agent.set_user_session(json_payload["user_id"], json_payload["session_id"])
# try:
# agent.clear_cache()
# return JSONResponse(content={"response": "Cache cleared"}, status_code=200)
# except Exception as e:
# raise HTTPException(status_code=500, detail=str(e))
#
# @app.post("/correct-prompt-grammar", response_model=dict)
# async def prompt_to_correct_grammar(request_data: Payload) -> dict:
# json_payload = request_data.payload
# agent = Agent()
# agent.set_user_session(json_payload["user_id"], json_payload["session_id"])
# logging.info("Correcting grammar %s", json_payload["prompt_source"])
#
# output = agent.prompt_correction(json_payload["prompt_source"], model_speed= json_payload["model_speed"])
# return JSONResponse(content={"response": {"result": json.loads(output)}})
# @app.post("/action-add-zapier-calendar-action", response_model=dict,dependencies=[Depends(auth)])
# async def action_add_zapier_calendar_action(
# request: Request, request_data: Payload
# ) -> dict:
# json_payload = request_data.payload
# agent = Agent()
# agent.set_user_session(json_payload["user_id"], json_payload["session_id"])
# # Extract the bearer token from the header
# auth_header = request.headers.get("Authorization")
# if auth_header:
# bearer_token = auth_header.replace("Bearer ", "")
# else:
# bearer_token = None
# outcome = agent.add_zapier_calendar_action(
# prompt_base=json_payload["prompt_base"],
# token=bearer_token,
# model_speed=json_payload["model_speed"],
# )
# return JSONResponse(content={"response": outcome})
await Memory_.async_init()
# memory_class = getattr(Memory_, f"_delete_{memory_type}_memory", None)
output = await Memory_._get_task_list(
user_input=decoded_payload["prompt"], params=decoded_payload["params"], attention_modulators=decoded_payload["attention_modulators"]
)
return JSONResponse(content={"response": output}, status_code=200)
except Exception as e:
return JSONResponse(content={"response": {"error": str(e)}}, status_code=503)
def start_api_server(host: str = "0.0.0.0", port: int = 8000):
"""

File diff suppressed because it is too large Load diff