conversation history fix
This commit is contained in:
parent
8503e0f661
commit
2bdfd3f49d
3 changed files with 133 additions and 24 deletions
|
|
@ -36,12 +36,23 @@ export default function ChatPage() {
|
||||||
timestamp: Date
|
timestamp: Date
|
||||||
} | null>(null)
|
} | null>(null)
|
||||||
const [expandedFunctionCalls, setExpandedFunctionCalls] = useState<Set<string>>(new Set())
|
const [expandedFunctionCalls, setExpandedFunctionCalls] = useState<Set<string>>(new Set())
|
||||||
|
const [previousResponseIds, setPreviousResponseIds] = useState<{
|
||||||
|
chat: string | null
|
||||||
|
langflow: string | null
|
||||||
|
}>({ chat: null, langflow: null })
|
||||||
const messagesEndRef = useRef<HTMLDivElement>(null)
|
const messagesEndRef = useRef<HTMLDivElement>(null)
|
||||||
|
|
||||||
const scrollToBottom = () => {
|
const scrollToBottom = () => {
|
||||||
messagesEndRef.current?.scrollIntoView({ behavior: "smooth" })
|
messagesEndRef.current?.scrollIntoView({ behavior: "smooth" })
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const handleEndpointChange = (newEndpoint: EndpointType) => {
|
||||||
|
setEndpoint(newEndpoint)
|
||||||
|
// Clear the conversation when switching endpoints to avoid response ID conflicts
|
||||||
|
setMessages([])
|
||||||
|
setPreviousResponseIds({ chat: null, langflow: null })
|
||||||
|
}
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
scrollToBottom()
|
scrollToBottom()
|
||||||
}, [messages, streamingMessage])
|
}, [messages, streamingMessage])
|
||||||
|
|
@ -50,15 +61,23 @@ export default function ChatPage() {
|
||||||
const apiEndpoint = endpoint === "chat" ? "/api/chat" : "/api/langflow"
|
const apiEndpoint = endpoint === "chat" ? "/api/chat" : "/api/langflow"
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
const requestBody: any = {
|
||||||
|
prompt: userMessage.content,
|
||||||
|
stream: true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add previous_response_id if we have one for this endpoint
|
||||||
|
const currentResponseId = previousResponseIds[endpoint]
|
||||||
|
if (currentResponseId) {
|
||||||
|
requestBody.previous_response_id = currentResponseId
|
||||||
|
}
|
||||||
|
|
||||||
const response = await fetch(apiEndpoint, {
|
const response = await fetch(apiEndpoint, {
|
||||||
method: "POST",
|
method: "POST",
|
||||||
headers: {
|
headers: {
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
},
|
},
|
||||||
body: JSON.stringify({
|
body: JSON.stringify(requestBody),
|
||||||
prompt: userMessage.content,
|
|
||||||
stream: true
|
|
||||||
}),
|
|
||||||
})
|
})
|
||||||
|
|
||||||
if (!response.ok) {
|
if (!response.ok) {
|
||||||
|
|
@ -74,6 +93,7 @@ export default function ChatPage() {
|
||||||
let buffer = ""
|
let buffer = ""
|
||||||
let currentContent = ""
|
let currentContent = ""
|
||||||
const currentFunctionCalls: FunctionCall[] = []
|
const currentFunctionCalls: FunctionCall[] = []
|
||||||
|
let newResponseId: string | null = null
|
||||||
|
|
||||||
// Initialize streaming message
|
// Initialize streaming message
|
||||||
setStreamingMessage({
|
setStreamingMessage({
|
||||||
|
|
@ -100,6 +120,13 @@ export default function ChatPage() {
|
||||||
const chunk = JSON.parse(line)
|
const chunk = JSON.parse(line)
|
||||||
console.log("Received chunk:", chunk.type || chunk.object, chunk)
|
console.log("Received chunk:", chunk.type || chunk.object, chunk)
|
||||||
|
|
||||||
|
// Extract response ID if present
|
||||||
|
if (chunk.id) {
|
||||||
|
newResponseId = chunk.id
|
||||||
|
} else if (chunk.response_id) {
|
||||||
|
newResponseId = chunk.response_id
|
||||||
|
}
|
||||||
|
|
||||||
// Handle OpenAI Chat Completions streaming format
|
// Handle OpenAI Chat Completions streaming format
|
||||||
if (chunk.object === "response.chunk" && chunk.delta) {
|
if (chunk.object === "response.chunk" && chunk.delta) {
|
||||||
// Handle function calls in delta
|
// Handle function calls in delta
|
||||||
|
|
@ -336,6 +363,14 @@ export default function ChatPage() {
|
||||||
setMessages(prev => [...prev, finalMessage])
|
setMessages(prev => [...prev, finalMessage])
|
||||||
setStreamingMessage(null)
|
setStreamingMessage(null)
|
||||||
|
|
||||||
|
// Store the response ID for the next request for this endpoint
|
||||||
|
if (newResponseId) {
|
||||||
|
setPreviousResponseIds(prev => ({
|
||||||
|
...prev,
|
||||||
|
[endpoint]: newResponseId
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error("SSE Stream error:", error)
|
console.error("SSE Stream error:", error)
|
||||||
setStreamingMessage(null)
|
setStreamingMessage(null)
|
||||||
|
|
@ -369,12 +404,21 @@ export default function ChatPage() {
|
||||||
// Original non-streaming logic
|
// Original non-streaming logic
|
||||||
try {
|
try {
|
||||||
const apiEndpoint = endpoint === "chat" ? "/api/chat" : "/api/langflow"
|
const apiEndpoint = endpoint === "chat" ? "/api/chat" : "/api/langflow"
|
||||||
|
|
||||||
|
const requestBody: any = { prompt: userMessage.content }
|
||||||
|
|
||||||
|
// Add previous_response_id if we have one for this endpoint
|
||||||
|
const currentResponseId = previousResponseIds[endpoint]
|
||||||
|
if (currentResponseId) {
|
||||||
|
requestBody.previous_response_id = currentResponseId
|
||||||
|
}
|
||||||
|
|
||||||
const response = await fetch(apiEndpoint, {
|
const response = await fetch(apiEndpoint, {
|
||||||
method: "POST",
|
method: "POST",
|
||||||
headers: {
|
headers: {
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
},
|
},
|
||||||
body: JSON.stringify({ prompt: userMessage.content }),
|
body: JSON.stringify(requestBody),
|
||||||
})
|
})
|
||||||
|
|
||||||
const result = await response.json()
|
const result = await response.json()
|
||||||
|
|
@ -386,6 +430,14 @@ export default function ChatPage() {
|
||||||
timestamp: new Date()
|
timestamp: new Date()
|
||||||
}
|
}
|
||||||
setMessages(prev => [...prev, assistantMessage])
|
setMessages(prev => [...prev, assistantMessage])
|
||||||
|
|
||||||
|
// Store the response ID if present for this endpoint
|
||||||
|
if (result.response_id) {
|
||||||
|
setPreviousResponseIds(prev => ({
|
||||||
|
...prev,
|
||||||
|
[endpoint]: result.response_id
|
||||||
|
}))
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
console.error("Chat failed:", result.error)
|
console.error("Chat failed:", result.error)
|
||||||
const errorMessage: Message = {
|
const errorMessage: Message = {
|
||||||
|
|
@ -526,7 +578,7 @@ export default function ChatPage() {
|
||||||
<Button
|
<Button
|
||||||
variant={endpoint === "chat" ? "default" : "ghost"}
|
variant={endpoint === "chat" ? "default" : "ghost"}
|
||||||
size="sm"
|
size="sm"
|
||||||
onClick={() => setEndpoint("chat")}
|
onClick={() => handleEndpointChange("chat")}
|
||||||
className="h-7 text-xs"
|
className="h-7 text-xs"
|
||||||
>
|
>
|
||||||
Chat
|
Chat
|
||||||
|
|
@ -534,7 +586,7 @@ export default function ChatPage() {
|
||||||
<Button
|
<Button
|
||||||
variant={endpoint === "langflow" ? "default" : "ghost"}
|
variant={endpoint === "langflow" ? "default" : "ghost"}
|
||||||
size="sm"
|
size="sm"
|
||||||
onClick={() => setEndpoint("langflow")}
|
onClick={() => handleEndpointChange("langflow")}
|
||||||
className="h-7 text-xs"
|
className="h-7 text-xs"
|
||||||
>
|
>
|
||||||
Langflow
|
Langflow
|
||||||
|
|
|
||||||
69
src/agent.py
69
src/agent.py
|
|
@ -1,5 +1,11 @@
|
||||||
messages = [{"role": "system", "content": "You are a helpful assistant. Always use the search_tools to answer questions."}]
|
messages = [{"role": "system", "content": "You are a helpful assistant. Always use the search_tools to answer questions."}]
|
||||||
|
|
||||||
|
# Simple session store for conversation state
|
||||||
|
conversation_state = {
|
||||||
|
"messages": messages,
|
||||||
|
"previous_response_id": None
|
||||||
|
}
|
||||||
|
|
||||||
# Generic async response function for streaming
|
# Generic async response function for streaming
|
||||||
async def async_response_stream(client, prompt: str, model: str, previous_response_id: str = None, log_prefix: str = "response"):
|
async def async_response_stream(client, prompt: str, model: str, previous_response_id: str = None, log_prefix: str = "response"):
|
||||||
print(f"user ==> {prompt}")
|
print(f"user ==> {prompt}")
|
||||||
|
|
@ -79,7 +85,15 @@ async def async_response(client, prompt: str, model: str, previous_response_id:
|
||||||
|
|
||||||
response_text = response.output_text
|
response_text = response.output_text
|
||||||
print(f"{log_prefix} ==> {response_text}")
|
print(f"{log_prefix} ==> {response_text}")
|
||||||
return response_text
|
|
||||||
|
# Extract and store response_id if available
|
||||||
|
response_id = getattr(response, 'id', None) or getattr(response, 'response_id', None)
|
||||||
|
if response_id:
|
||||||
|
global conversation_state
|
||||||
|
conversation_state["previous_response_id"] = response_id
|
||||||
|
print(f"Stored response_id: {response_id}")
|
||||||
|
|
||||||
|
return response_text, response_id
|
||||||
|
|
||||||
# Unified streaming function for both chat and langflow
|
# Unified streaming function for both chat and langflow
|
||||||
async def async_stream(client, prompt: str, model: str, previous_response_id: str = None, log_prefix: str = "response"):
|
async def async_stream(client, prompt: str, model: str, previous_response_id: str = None, log_prefix: str = "response"):
|
||||||
|
|
@ -87,14 +101,15 @@ async def async_stream(client, prompt: str, model: str, previous_response_id: st
|
||||||
yield chunk
|
yield chunk
|
||||||
|
|
||||||
# Async langflow function (non-streaming only)
|
# Async langflow function (non-streaming only)
|
||||||
async def async_langflow(langflow_client, flow_id: str, prompt: str):
|
async def async_langflow(langflow_client, flow_id: str, prompt: str, previous_response_id: str = None):
|
||||||
return await async_response(langflow_client, prompt, flow_id, log_prefix="langflow")
|
response_text, response_id = await async_response(langflow_client, prompt, flow_id, previous_response_id=previous_response_id, log_prefix="langflow")
|
||||||
|
return response_text, response_id
|
||||||
|
|
||||||
# Async langflow function for streaming (alias for compatibility)
|
# Async langflow function for streaming (alias for compatibility)
|
||||||
async def async_langflow_stream(langflow_client, flow_id: str, prompt: str):
|
async def async_langflow_stream(langflow_client, flow_id: str, prompt: str, previous_response_id: str = None):
|
||||||
print(f"[DEBUG] Starting langflow stream for prompt: {prompt}")
|
print(f"[DEBUG] Starting langflow stream for prompt: {prompt}")
|
||||||
try:
|
try:
|
||||||
async for chunk in async_stream(langflow_client, prompt, flow_id, log_prefix="langflow"):
|
async for chunk in async_stream(langflow_client, prompt, flow_id, previous_response_id=previous_response_id, log_prefix="langflow"):
|
||||||
print(f"[DEBUG] Yielding chunk from langflow_stream: {chunk[:100]}...")
|
print(f"[DEBUG] Yielding chunk from langflow_stream: {chunk[:100]}...")
|
||||||
yield chunk
|
yield chunk
|
||||||
print(f"[DEBUG] Langflow stream completed")
|
print(f"[DEBUG] Langflow stream completed")
|
||||||
|
|
@ -106,13 +121,47 @@ async def async_langflow_stream(langflow_client, flow_id: str, prompt: str):
|
||||||
|
|
||||||
# Async chat function (non-streaming only)
|
# Async chat function (non-streaming only)
|
||||||
async def async_chat(async_client, prompt: str, model: str = "gpt-4.1-mini", previous_response_id: str = None):
|
async def async_chat(async_client, prompt: str, model: str = "gpt-4.1-mini", previous_response_id: str = None):
|
||||||
global messages
|
global conversation_state
|
||||||
messages += [{"role": "user", "content": prompt}]
|
|
||||||
return await async_response(async_client, prompt, model, previous_response_id=previous_response_id, log_prefix="agent")
|
# If no previous_response_id is provided, reset conversation state
|
||||||
|
if previous_response_id is None:
|
||||||
|
conversation_state["messages"] = [{"role": "system", "content": "You are a helpful assistant. Always use the search_tools to answer questions."}]
|
||||||
|
conversation_state["previous_response_id"] = None
|
||||||
|
|
||||||
|
# Add user message to conversation
|
||||||
|
conversation_state["messages"].append({"role": "user", "content": prompt})
|
||||||
|
|
||||||
|
response_text, response_id = await async_response(async_client, prompt, model, previous_response_id=previous_response_id, log_prefix="agent")
|
||||||
|
|
||||||
|
# Add assistant response to conversation
|
||||||
|
conversation_state["messages"].append({"role": "assistant", "content": response_text})
|
||||||
|
|
||||||
|
return response_text, response_id
|
||||||
|
|
||||||
# Async chat function for streaming (alias for compatibility)
|
# Async chat function for streaming (alias for compatibility)
|
||||||
async def async_chat_stream(async_client, prompt: str, model: str = "gpt-4.1-mini", previous_response_id: str = None):
|
async def async_chat_stream(async_client, prompt: str, model: str = "gpt-4.1-mini", previous_response_id: str = None):
|
||||||
global messages
|
global conversation_state
|
||||||
messages += [{"role": "user", "content": prompt}]
|
|
||||||
|
# If no previous_response_id is provided, reset conversation state
|
||||||
|
if previous_response_id is None:
|
||||||
|
conversation_state["messages"] = [{"role": "system", "content": "You are a helpful assistant. Always use the search_tools to answer questions."}]
|
||||||
|
conversation_state["previous_response_id"] = None
|
||||||
|
|
||||||
|
# Add user message to conversation
|
||||||
|
conversation_state["messages"].append({"role": "user", "content": prompt})
|
||||||
|
|
||||||
|
full_response = ""
|
||||||
async for chunk in async_stream(async_client, prompt, model, previous_response_id=previous_response_id, log_prefix="agent"):
|
async for chunk in async_stream(async_client, prompt, model, previous_response_id=previous_response_id, log_prefix="agent"):
|
||||||
|
# Extract text content to build full response for history
|
||||||
|
try:
|
||||||
|
import json
|
||||||
|
chunk_data = json.loads(chunk.decode('utf-8'))
|
||||||
|
if 'delta' in chunk_data and 'content' in chunk_data['delta']:
|
||||||
|
full_response += chunk_data['delta']['content']
|
||||||
|
except:
|
||||||
|
pass
|
||||||
yield chunk
|
yield chunk
|
||||||
|
|
||||||
|
# Add the complete assistant response to message history
|
||||||
|
if full_response:
|
||||||
|
conversation_state["messages"].append({"role": "assistant", "content": full_response})
|
||||||
20
src/app.py
20
src/app.py
|
|
@ -305,6 +305,7 @@ async def search_tool(query: str)-> dict[str, Any]:
|
||||||
async def chat_endpoint(request):
|
async def chat_endpoint(request):
|
||||||
data = await request.json()
|
data = await request.json()
|
||||||
prompt = data.get("prompt", "")
|
prompt = data.get("prompt", "")
|
||||||
|
previous_response_id = data.get("previous_response_id")
|
||||||
stream = data.get("stream", False)
|
stream = data.get("stream", False)
|
||||||
|
|
||||||
if not prompt:
|
if not prompt:
|
||||||
|
|
@ -313,7 +314,7 @@ async def chat_endpoint(request):
|
||||||
if stream:
|
if stream:
|
||||||
from agent import async_chat_stream
|
from agent import async_chat_stream
|
||||||
return StreamingResponse(
|
return StreamingResponse(
|
||||||
async_chat_stream(patched_async_client, prompt),
|
async_chat_stream(patched_async_client, prompt, previous_response_id=previous_response_id),
|
||||||
media_type="text/event-stream",
|
media_type="text/event-stream",
|
||||||
headers={
|
headers={
|
||||||
"Cache-Control": "no-cache",
|
"Cache-Control": "no-cache",
|
||||||
|
|
@ -323,12 +324,16 @@ async def chat_endpoint(request):
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
response = await async_chat(patched_async_client, prompt)
|
response_text, response_id = await async_chat(patched_async_client, prompt, previous_response_id=previous_response_id)
|
||||||
return JSONResponse({"response": response})
|
response_data = {"response": response_text}
|
||||||
|
if response_id:
|
||||||
|
response_data["response_id"] = response_id
|
||||||
|
return JSONResponse(response_data)
|
||||||
|
|
||||||
async def langflow_endpoint(request):
|
async def langflow_endpoint(request):
|
||||||
data = await request.json()
|
data = await request.json()
|
||||||
prompt = data.get("prompt", "")
|
prompt = data.get("prompt", "")
|
||||||
|
previous_response_id = data.get("previous_response_id")
|
||||||
stream = data.get("stream", False)
|
stream = data.get("stream", False)
|
||||||
|
|
||||||
if not prompt:
|
if not prompt:
|
||||||
|
|
@ -341,7 +346,7 @@ async def langflow_endpoint(request):
|
||||||
if stream:
|
if stream:
|
||||||
from agent import async_langflow_stream
|
from agent import async_langflow_stream
|
||||||
return StreamingResponse(
|
return StreamingResponse(
|
||||||
async_langflow_stream(langflow_client, flow_id, prompt),
|
async_langflow_stream(langflow_client, flow_id, prompt, previous_response_id=previous_response_id),
|
||||||
media_type="text/event-stream",
|
media_type="text/event-stream",
|
||||||
headers={
|
headers={
|
||||||
"Cache-Control": "no-cache",
|
"Cache-Control": "no-cache",
|
||||||
|
|
@ -351,8 +356,11 @@ async def langflow_endpoint(request):
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
response = await async_langflow(langflow_client, flow_id, prompt)
|
response_text, response_id = await async_langflow(langflow_client, flow_id, prompt, previous_response_id=previous_response_id)
|
||||||
return JSONResponse({"response": response})
|
response_data = {"response": response_text}
|
||||||
|
if response_id:
|
||||||
|
response_data["response_id"] = response_id
|
||||||
|
return JSONResponse(response_data)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return JSONResponse({"error": f"Langflow request failed: {str(e)}"}, status_code=500)
|
return JSONResponse({"error": f"Langflow request failed: {str(e)}"}, status_code=500)
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue