conversation history fix
This commit is contained in:
parent
8503e0f661
commit
2bdfd3f49d
3 changed files with 133 additions and 24 deletions
|
|
@ -36,12 +36,23 @@ export default function ChatPage() {
|
|||
timestamp: Date
|
||||
} | null>(null)
|
||||
const [expandedFunctionCalls, setExpandedFunctionCalls] = useState<Set<string>>(new Set())
|
||||
const [previousResponseIds, setPreviousResponseIds] = useState<{
|
||||
chat: string | null
|
||||
langflow: string | null
|
||||
}>({ chat: null, langflow: null })
|
||||
const messagesEndRef = useRef<HTMLDivElement>(null)
|
||||
|
||||
const scrollToBottom = () => {
|
||||
messagesEndRef.current?.scrollIntoView({ behavior: "smooth" })
|
||||
}
|
||||
|
||||
const handleEndpointChange = (newEndpoint: EndpointType) => {
|
||||
setEndpoint(newEndpoint)
|
||||
// Clear the conversation when switching endpoints to avoid response ID conflicts
|
||||
setMessages([])
|
||||
setPreviousResponseIds({ chat: null, langflow: null })
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
scrollToBottom()
|
||||
}, [messages, streamingMessage])
|
||||
|
|
@ -50,15 +61,23 @@ export default function ChatPage() {
|
|||
const apiEndpoint = endpoint === "chat" ? "/api/chat" : "/api/langflow"
|
||||
|
||||
try {
|
||||
const requestBody: any = {
|
||||
prompt: userMessage.content,
|
||||
stream: true
|
||||
}
|
||||
|
||||
// Add previous_response_id if we have one for this endpoint
|
||||
const currentResponseId = previousResponseIds[endpoint]
|
||||
if (currentResponseId) {
|
||||
requestBody.previous_response_id = currentResponseId
|
||||
}
|
||||
|
||||
const response = await fetch(apiEndpoint, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
body: JSON.stringify({
|
||||
prompt: userMessage.content,
|
||||
stream: true
|
||||
}),
|
||||
body: JSON.stringify(requestBody),
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
|
|
@ -74,6 +93,7 @@ export default function ChatPage() {
|
|||
let buffer = ""
|
||||
let currentContent = ""
|
||||
const currentFunctionCalls: FunctionCall[] = []
|
||||
let newResponseId: string | null = null
|
||||
|
||||
// Initialize streaming message
|
||||
setStreamingMessage({
|
||||
|
|
@ -100,6 +120,13 @@ export default function ChatPage() {
|
|||
const chunk = JSON.parse(line)
|
||||
console.log("Received chunk:", chunk.type || chunk.object, chunk)
|
||||
|
||||
// Extract response ID if present
|
||||
if (chunk.id) {
|
||||
newResponseId = chunk.id
|
||||
} else if (chunk.response_id) {
|
||||
newResponseId = chunk.response_id
|
||||
}
|
||||
|
||||
// Handle OpenAI Chat Completions streaming format
|
||||
if (chunk.object === "response.chunk" && chunk.delta) {
|
||||
// Handle function calls in delta
|
||||
|
|
@ -336,6 +363,14 @@ export default function ChatPage() {
|
|||
setMessages(prev => [...prev, finalMessage])
|
||||
setStreamingMessage(null)
|
||||
|
||||
// Store the response ID for the next request for this endpoint
|
||||
if (newResponseId) {
|
||||
setPreviousResponseIds(prev => ({
|
||||
...prev,
|
||||
[endpoint]: newResponseId
|
||||
}))
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
console.error("SSE Stream error:", error)
|
||||
setStreamingMessage(null)
|
||||
|
|
@ -369,12 +404,21 @@ export default function ChatPage() {
|
|||
// Original non-streaming logic
|
||||
try {
|
||||
const apiEndpoint = endpoint === "chat" ? "/api/chat" : "/api/langflow"
|
||||
|
||||
const requestBody: any = { prompt: userMessage.content }
|
||||
|
||||
// Add previous_response_id if we have one for this endpoint
|
||||
const currentResponseId = previousResponseIds[endpoint]
|
||||
if (currentResponseId) {
|
||||
requestBody.previous_response_id = currentResponseId
|
||||
}
|
||||
|
||||
const response = await fetch(apiEndpoint, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
body: JSON.stringify({ prompt: userMessage.content }),
|
||||
body: JSON.stringify(requestBody),
|
||||
})
|
||||
|
||||
const result = await response.json()
|
||||
|
|
@ -386,6 +430,14 @@ export default function ChatPage() {
|
|||
timestamp: new Date()
|
||||
}
|
||||
setMessages(prev => [...prev, assistantMessage])
|
||||
|
||||
// Store the response ID if present for this endpoint
|
||||
if (result.response_id) {
|
||||
setPreviousResponseIds(prev => ({
|
||||
...prev,
|
||||
[endpoint]: result.response_id
|
||||
}))
|
||||
}
|
||||
} else {
|
||||
console.error("Chat failed:", result.error)
|
||||
const errorMessage: Message = {
|
||||
|
|
@ -526,7 +578,7 @@ export default function ChatPage() {
|
|||
<Button
|
||||
variant={endpoint === "chat" ? "default" : "ghost"}
|
||||
size="sm"
|
||||
onClick={() => setEndpoint("chat")}
|
||||
onClick={() => handleEndpointChange("chat")}
|
||||
className="h-7 text-xs"
|
||||
>
|
||||
Chat
|
||||
|
|
@ -534,7 +586,7 @@ export default function ChatPage() {
|
|||
<Button
|
||||
variant={endpoint === "langflow" ? "default" : "ghost"}
|
||||
size="sm"
|
||||
onClick={() => setEndpoint("langflow")}
|
||||
onClick={() => handleEndpointChange("langflow")}
|
||||
className="h-7 text-xs"
|
||||
>
|
||||
Langflow
|
||||
|
|
|
|||
71
src/agent.py
71
src/agent.py
|
|
@ -1,5 +1,11 @@
|
|||
messages = [{"role": "system", "content": "You are a helpful assistant. Always use the search_tools to answer questions."}]
|
||||
|
||||
# Simple session store for conversation state
|
||||
conversation_state = {
|
||||
"messages": messages,
|
||||
"previous_response_id": None
|
||||
}
|
||||
|
||||
# Generic async response function for streaming
|
||||
async def async_response_stream(client, prompt: str, model: str, previous_response_id: str = None, log_prefix: str = "response"):
|
||||
print(f"user ==> {prompt}")
|
||||
|
|
@ -79,7 +85,15 @@ async def async_response(client, prompt: str, model: str, previous_response_id:
|
|||
|
||||
response_text = response.output_text
|
||||
print(f"{log_prefix} ==> {response_text}")
|
||||
return response_text
|
||||
|
||||
# Extract and store response_id if available
|
||||
response_id = getattr(response, 'id', None) or getattr(response, 'response_id', None)
|
||||
if response_id:
|
||||
global conversation_state
|
||||
conversation_state["previous_response_id"] = response_id
|
||||
print(f"Stored response_id: {response_id}")
|
||||
|
||||
return response_text, response_id
|
||||
|
||||
# Unified streaming function for both chat and langflow
|
||||
async def async_stream(client, prompt: str, model: str, previous_response_id: str = None, log_prefix: str = "response"):
|
||||
|
|
@ -87,14 +101,15 @@ async def async_stream(client, prompt: str, model: str, previous_response_id: st
|
|||
yield chunk
|
||||
|
||||
# Async langflow function (non-streaming only)
|
||||
async def async_langflow(langflow_client, flow_id: str, prompt: str):
|
||||
return await async_response(langflow_client, prompt, flow_id, log_prefix="langflow")
|
||||
async def async_langflow(langflow_client, flow_id: str, prompt: str, previous_response_id: str = None):
|
||||
response_text, response_id = await async_response(langflow_client, prompt, flow_id, previous_response_id=previous_response_id, log_prefix="langflow")
|
||||
return response_text, response_id
|
||||
|
||||
# Async langflow function for streaming (alias for compatibility)
|
||||
async def async_langflow_stream(langflow_client, flow_id: str, prompt: str):
|
||||
async def async_langflow_stream(langflow_client, flow_id: str, prompt: str, previous_response_id: str = None):
|
||||
print(f"[DEBUG] Starting langflow stream for prompt: {prompt}")
|
||||
try:
|
||||
async for chunk in async_stream(langflow_client, prompt, flow_id, log_prefix="langflow"):
|
||||
async for chunk in async_stream(langflow_client, prompt, flow_id, previous_response_id=previous_response_id, log_prefix="langflow"):
|
||||
print(f"[DEBUG] Yielding chunk from langflow_stream: {chunk[:100]}...")
|
||||
yield chunk
|
||||
print(f"[DEBUG] Langflow stream completed")
|
||||
|
|
@ -106,13 +121,47 @@ async def async_langflow_stream(langflow_client, flow_id: str, prompt: str):
|
|||
|
||||
# Async chat function (non-streaming only)
|
||||
async def async_chat(async_client, prompt: str, model: str = "gpt-4.1-mini", previous_response_id: str = None):
|
||||
global messages
|
||||
messages += [{"role": "user", "content": prompt}]
|
||||
return await async_response(async_client, prompt, model, previous_response_id=previous_response_id, log_prefix="agent")
|
||||
global conversation_state
|
||||
|
||||
# If no previous_response_id is provided, reset conversation state
|
||||
if previous_response_id is None:
|
||||
conversation_state["messages"] = [{"role": "system", "content": "You are a helpful assistant. Always use the search_tools to answer questions."}]
|
||||
conversation_state["previous_response_id"] = None
|
||||
|
||||
# Add user message to conversation
|
||||
conversation_state["messages"].append({"role": "user", "content": prompt})
|
||||
|
||||
response_text, response_id = await async_response(async_client, prompt, model, previous_response_id=previous_response_id, log_prefix="agent")
|
||||
|
||||
# Add assistant response to conversation
|
||||
conversation_state["messages"].append({"role": "assistant", "content": response_text})
|
||||
|
||||
return response_text, response_id
|
||||
|
||||
# Async chat function for streaming (alias for compatibility)
|
||||
async def async_chat_stream(async_client, prompt: str, model: str = "gpt-4.1-mini", previous_response_id: str = None):
|
||||
global messages
|
||||
messages += [{"role": "user", "content": prompt}]
|
||||
global conversation_state
|
||||
|
||||
# If no previous_response_id is provided, reset conversation state
|
||||
if previous_response_id is None:
|
||||
conversation_state["messages"] = [{"role": "system", "content": "You are a helpful assistant. Always use the search_tools to answer questions."}]
|
||||
conversation_state["previous_response_id"] = None
|
||||
|
||||
# Add user message to conversation
|
||||
conversation_state["messages"].append({"role": "user", "content": prompt})
|
||||
|
||||
full_response = ""
|
||||
async for chunk in async_stream(async_client, prompt, model, previous_response_id=previous_response_id, log_prefix="agent"):
|
||||
yield chunk
|
||||
# Extract text content to build full response for history
|
||||
try:
|
||||
import json
|
||||
chunk_data = json.loads(chunk.decode('utf-8'))
|
||||
if 'delta' in chunk_data and 'content' in chunk_data['delta']:
|
||||
full_response += chunk_data['delta']['content']
|
||||
except:
|
||||
pass
|
||||
yield chunk
|
||||
|
||||
# Add the complete assistant response to message history
|
||||
if full_response:
|
||||
conversation_state["messages"].append({"role": "assistant", "content": full_response})
|
||||
20
src/app.py
20
src/app.py
|
|
@ -305,6 +305,7 @@ async def search_tool(query: str)-> dict[str, Any]:
|
|||
async def chat_endpoint(request):
|
||||
data = await request.json()
|
||||
prompt = data.get("prompt", "")
|
||||
previous_response_id = data.get("previous_response_id")
|
||||
stream = data.get("stream", False)
|
||||
|
||||
if not prompt:
|
||||
|
|
@ -313,7 +314,7 @@ async def chat_endpoint(request):
|
|||
if stream:
|
||||
from agent import async_chat_stream
|
||||
return StreamingResponse(
|
||||
async_chat_stream(patched_async_client, prompt),
|
||||
async_chat_stream(patched_async_client, prompt, previous_response_id=previous_response_id),
|
||||
media_type="text/event-stream",
|
||||
headers={
|
||||
"Cache-Control": "no-cache",
|
||||
|
|
@ -323,12 +324,16 @@ async def chat_endpoint(request):
|
|||
}
|
||||
)
|
||||
else:
|
||||
response = await async_chat(patched_async_client, prompt)
|
||||
return JSONResponse({"response": response})
|
||||
response_text, response_id = await async_chat(patched_async_client, prompt, previous_response_id=previous_response_id)
|
||||
response_data = {"response": response_text}
|
||||
if response_id:
|
||||
response_data["response_id"] = response_id
|
||||
return JSONResponse(response_data)
|
||||
|
||||
async def langflow_endpoint(request):
|
||||
data = await request.json()
|
||||
prompt = data.get("prompt", "")
|
||||
previous_response_id = data.get("previous_response_id")
|
||||
stream = data.get("stream", False)
|
||||
|
||||
if not prompt:
|
||||
|
|
@ -341,7 +346,7 @@ async def langflow_endpoint(request):
|
|||
if stream:
|
||||
from agent import async_langflow_stream
|
||||
return StreamingResponse(
|
||||
async_langflow_stream(langflow_client, flow_id, prompt),
|
||||
async_langflow_stream(langflow_client, flow_id, prompt, previous_response_id=previous_response_id),
|
||||
media_type="text/event-stream",
|
||||
headers={
|
||||
"Cache-Control": "no-cache",
|
||||
|
|
@ -351,8 +356,11 @@ async def langflow_endpoint(request):
|
|||
}
|
||||
)
|
||||
else:
|
||||
response = await async_langflow(langflow_client, flow_id, prompt)
|
||||
return JSONResponse({"response": response})
|
||||
response_text, response_id = await async_langflow(langflow_client, flow_id, prompt, previous_response_id=previous_response_id)
|
||||
response_data = {"response": response_text}
|
||||
if response_id:
|
||||
response_data["response_id"] = response_id
|
||||
return JSONResponse(response_data)
|
||||
|
||||
except Exception as e:
|
||||
return JSONResponse({"error": f"Langflow request failed: {str(e)}"}, status_code=500)
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue