new langflow endpoint

This commit is contained in:
phact 2025-07-16 03:54:46 -04:00
parent 1561d504f6
commit 3777e61496
4 changed files with 48 additions and 14 deletions

View file

@ -5,7 +5,7 @@ description = "Add your description here"
readme = "README.md"
requires-python = ">=3.13"
dependencies = [
"agentd>=0.2.0.post2",
"agentd>=0.2.0.post3",
"aiofiles>=24.1.0",
"docling>=2.41.0",
"opensearch-py[async]>=3.0.0",

View file

@ -1,4 +1,4 @@
messages = [{"role": "system", "content": "You are a helpful assistant. use your tools to answer questions."}]
messages = [{"role": "system", "content": "You are a helpful assistant. Always use the search_tools to answer questions."}]
# Async version for web server
async def async_chat(async_client, prompt: str, model: str = "gpt-4.1-mini", previous_response_id: str = None) -> str:

View file

@ -11,9 +11,6 @@ os.environ['USE_CPU_ONLY'] = 'true'
import hashlib
import tempfile
import asyncio
import time
import json
import httpx
from starlette.applications import Starlette
from starlette.requests import Request
@ -27,6 +24,10 @@ from docling.document_converter import DocumentConverter
from agentd.patch import patch_openai_with_mcp
from openai import AsyncOpenAI
from agentd.tool_decorator import tool
from dotenv import load_dotenv
load_dotenv()
load_dotenv("../")
# Initialize Docling converter
converter = DocumentConverter() # basic converter; tweak via PipelineOptions if you need OCR, etc.
@ -35,7 +36,12 @@ converter = DocumentConverter() # basic converter; tweak via PipelineOptions if
opensearch_host = os.getenv("OPENSEARCH_HOST", "localhost")
opensearch_port = int(os.getenv("OPENSEARCH_PORT", "9200"))
opensearch_username = os.getenv("OPENSEARCH_USERNAME", "admin")
opensearch_password = os.getenv("OPENSEARCH_PASSWORD", "OSisgendb1!")
opensearch_password = os.getenv("OPENSEARCH_PASSWORD")
langflow_url = os.getenv("LANGFLOW_URL", "http://localhost:7860")
flow_id = os.getenv("FLOW_ID")
langflow_key = os.getenv("LANGFLOW_SECRET_KEY")
es = AsyncOpenSearch(
hosts=[{"host": opensearch_host, "port": opensearch_port}],
@ -81,7 +87,11 @@ index_body = {
}
}
async_client = patch_openai_with_mcp(AsyncOpenAI()) # Get the patched client back
langflow_client = AsyncOpenAI(
base_url=f"{langflow_url}/api/v1",
api_key=langflow_key
)
patched_async_client = patch_openai_with_mcp(AsyncOpenAI()) # Get the patched client back
async def wait_for_opensearch():
"""Wait for OpenSearch to be ready with retries"""
@ -175,7 +185,7 @@ async def process_file_common(file_path: str, file_hash: str = None):
slim_doc = extract_relevant(full_doc)
texts = [c["text"] for c in slim_doc["chunks"]]
resp = await async_client.embeddings.create(model=EMBED_MODEL, input=texts)
resp = await patched_async_client.embeddings.create(model=EMBED_MODEL, input=texts)
embeddings = [d.embedding for d in resp.data]
# Index each chunk as a separate document
@ -264,7 +274,7 @@ async def search_tool(query: str)-> dict[str, Any]:
- {"results": [chunks]} on success
"""
# Embed the query
resp = await async_client.embeddings.create(model=EMBED_MODEL, input=[query])
resp = await patched_async_client.embeddings.create(model=EMBED_MODEL, input=[query])
query_embedding = resp.data[0].embedding
# Search using vector similarity on individual chunks
search_body = {
@ -299,14 +309,38 @@ async def chat_endpoint(request):
if not prompt:
return JSONResponse({"error": "Prompt is required"}, status_code=400)
response = await async_chat(async_client, prompt)
response = await async_chat(patched_async_client, prompt)
return JSONResponse({"response": response})
async def langflow_endpoint(request):
data = await request.json()
prompt = data.get("prompt", "")
if not prompt:
return JSONResponse({"error": "Prompt is required"}, status_code=400)
if not langflow_url or not flow_id or not langflow_key:
return JSONResponse({"error": "LANGFLOW_URL, FLOW_ID, and LANGFLOW_KEY environment variables are required"}, status_code=500)
try:
response = await langflow_client.responses.create(
model=flow_id,
input=prompt
)
response_text = response.output_text
return JSONResponse({"response": response_text})
except Exception as e:
return JSONResponse({"error": f"Langflow request failed: {str(e)}"}, status_code=500)
app = Starlette(debug=True, routes=[
Route("/upload", upload, methods=["POST"]),
Route("/upload_path", upload_path, methods=["POST"]),
Route("/search", search, methods=["POST"]),
Route("/chat", chat_endpoint, methods=["POST"]),
Route("/langflow", langflow_endpoint, methods=["POST"]),
])
if __name__ == "__main__":

8
uv.lock generated
View file

@ -9,7 +9,7 @@ resolution-markers = [
[[package]]
name = "agentd"
version = "0.2.0.post2"
version = "0.2.0.post3"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "litellm" },
@ -18,9 +18,9 @@ dependencies = [
{ name = "openai-agents" },
{ name = "pyyaml" },
]
sdist = { url = "https://files.pythonhosted.org/packages/91/fb/177ce5c7e8f8e8c4a4771b1da26e09e62780bf6f4042622654d05b101534/agentd-0.2.0.post2.tar.gz", hash = "sha256:b4cf8f5b727c1f0c0c9685762415e5affbc501758c0641eb9bd9c7d972c3ef30", size = 114513, upload-time = "2025-07-16T05:13:30.646Z" }
sdist = { url = "https://files.pythonhosted.org/packages/6e/89/2bc397c80764d6acfeb6de7ac6d3ce8914f6f37f57307d1e2b6f7e8b0923/agentd-0.2.0.post3.tar.gz", hash = "sha256:765cb51798791eed32687b44305b20dd4130990471f0f1914afa2b292d09cb5e", size = 114530, upload-time = "2025-07-16T06:13:11.423Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e4/5d/232b01286225d5e6eff6e4e741411950d60002eeeceb7b2ab1e84d96cc66/agentd-0.2.0.post2-py3-none-any.whl", hash = "sha256:000a058758843739061c93503db5977c9734f2e690545dd21923bf9ae8a8a161", size = 13266, upload-time = "2025-07-16T05:13:29.389Z" },
{ url = "https://files.pythonhosted.org/packages/8c/f8/ced474722557f11e0a2f7e691371cf25c6a823507cc297971baa71bfbaac/agentd-0.2.0.post3-py3-none-any.whl", hash = "sha256:d05c6123a33d9f0b466fba4f7b378618c352ca367c65cd2c5a54f867af7b3cff", size = 13299, upload-time = "2025-07-16T06:13:10.034Z" },
]
[[package]]
@ -434,7 +434,7 @@ dependencies = [
[package.metadata]
requires-dist = [
{ name = "agentd", specifier = ">=0.2.0.post2" },
{ name = "agentd", specifier = ">=0.2.0.post3" },
{ name = "aiofiles", specifier = ">=24.1.0" },
{ name = "docling", specifier = ">=2.41.0" },
{ name = "opensearch-py", extras = ["async"], specifier = ">=3.0.0" },