(frontend): refactor message processing in ChatPage component to handle function calls from chunks or response_data

♻️ (agent.py): refactor async_response, async_langflow, async_chat, async_langflow_chat, and async_langflow_chat_stream functions to return full response object for function calls
🔧 (chat_service.py): update ChatService to include function call data in message_data if present
This commit is contained in:
cristhianzl 2025-09-05 16:53:02 -03:00
parent 7ff3bfd70b
commit f83851b259
3 changed files with 121 additions and 13 deletions

View file

@ -454,12 +454,99 @@ function ChatPage() {
content: string;
timestamp?: string;
response_id?: string;
}) => ({
role: msg.role as "user" | "assistant",
content: msg.content,
timestamp: new Date(msg.timestamp || new Date()),
// Add any other necessary properties
})
chunks?: any[];
response_data?: any;
}) => {
const message: Message = {
role: msg.role as "user" | "assistant",
content: msg.content,
timestamp: new Date(msg.timestamp || new Date()),
};
// Extract function calls from chunks or response_data
if (msg.role === "assistant" && (msg.chunks || msg.response_data)) {
const functionCalls: FunctionCall[] = [];
console.log("Processing assistant message for function calls:", {
hasChunks: !!msg.chunks,
chunksLength: msg.chunks?.length,
hasResponseData: !!msg.response_data,
});
// Process chunks (streaming data)
if (msg.chunks && Array.isArray(msg.chunks)) {
for (const chunk of msg.chunks) {
// Handle Langflow format: chunks[].item.tool_call
if (chunk.item && chunk.item.type === "tool_call") {
const toolCall = chunk.item;
console.log("Found Langflow tool call:", toolCall);
functionCalls.push({
id: toolCall.id,
name: toolCall.tool_name,
arguments: toolCall.inputs || {},
argumentsString: JSON.stringify(toolCall.inputs || {}),
result: toolCall.results,
status: toolCall.status || "completed",
type: "tool_call",
});
}
// Handle OpenAI format: chunks[].delta.tool_calls
else if (chunk.delta?.tool_calls) {
for (const toolCall of chunk.delta.tool_calls) {
if (toolCall.function) {
functionCalls.push({
id: toolCall.id,
name: toolCall.function.name,
arguments: toolCall.function.arguments ? JSON.parse(toolCall.function.arguments) : {},
argumentsString: toolCall.function.arguments,
status: "completed",
type: toolCall.type || "function",
});
}
}
}
// Process tool call results from chunks
if (chunk.type === "response.tool_call.result" || chunk.type === "tool_call_result") {
const lastCall = functionCalls[functionCalls.length - 1];
if (lastCall) {
lastCall.result = chunk.result || chunk;
lastCall.status = "completed";
}
}
}
}
// Process response_data (non-streaming data)
if (msg.response_data && typeof msg.response_data === 'object') {
// Look for tool_calls in various places in the response data
const responseData = typeof msg.response_data === 'string' ? JSON.parse(msg.response_data) : msg.response_data;
if (responseData.tool_calls && Array.isArray(responseData.tool_calls)) {
for (const toolCall of responseData.tool_calls) {
functionCalls.push({
id: toolCall.id,
name: toolCall.function?.name || toolCall.name,
arguments: toolCall.function?.arguments || toolCall.arguments,
argumentsString: typeof (toolCall.function?.arguments || toolCall.arguments) === 'string'
? toolCall.function?.arguments || toolCall.arguments
: JSON.stringify(toolCall.function?.arguments || toolCall.arguments),
result: toolCall.result,
status: "completed",
type: toolCall.type || "function",
});
}
}
}
if (functionCalls.length > 0) {
console.log("Setting functionCalls on message:", functionCalls);
message.functionCalls = functionCalls;
} else {
console.log("No function calls found in message");
}
}
return message;
}
);
setMessages(convertedMessages);

View file

@ -180,7 +180,7 @@ async def async_response(
response, "response_id", None
)
return response_text, response_id
return response_text, response_id, response
# Unified streaming function for both chat and langflow
@ -211,7 +211,7 @@ async def async_langflow(
extra_headers: dict = None,
previous_response_id: str = None,
):
response_text, response_id = await async_response(
response_text, response_id, response_obj = await async_response(
langflow_client,
prompt,
flow_id,
@ -281,7 +281,7 @@ async def async_chat(
"Added user message", message_count=len(conversation_state["messages"])
)
response_text, response_id = await async_response(
response_text, response_id, response_obj = await async_response(
async_client,
prompt,
model,
@ -292,12 +292,13 @@ async def async_chat(
"Got response", response_preview=response_text[:50], response_id=response_id
)
# Add assistant response to conversation with response_id and timestamp
# Add assistant response to conversation with response_id, timestamp, and full response object
assistant_message = {
"role": "assistant",
"content": response_text,
"response_id": response_id,
"timestamp": datetime.now(),
"response_data": response_obj.model_dump() if hasattr(response_obj, "model_dump") else str(response_obj), # Store complete response for function calls
}
conversation_state["messages"].append(assistant_message)
logger.debug(
@ -419,7 +420,7 @@ async def async_langflow_chat(
message_count=len(conversation_state["messages"]),
)
response_text, response_id = await async_response(
response_text, response_id, response_obj = await async_response(
langflow_client,
prompt,
flow_id,
@ -433,12 +434,13 @@ async def async_langflow_chat(
response_id=response_id,
)
# Add assistant response to conversation with response_id and timestamp
# Add assistant response to conversation with response_id, timestamp, and full response object
assistant_message = {
"role": "assistant",
"content": response_text,
"response_id": response_id,
"timestamp": datetime.now(),
"response_data": response_obj.model_dump() if hasattr(response_obj, "model_dump") else str(response_obj), # Store complete response for function calls
}
conversation_state["messages"].append(assistant_message)
logger.debug(
@ -504,6 +506,8 @@ async def async_langflow_chat_stream(
full_response = ""
response_id = None
collected_chunks = [] # Store all chunks for function call data
async for chunk in async_stream(
langflow_client,
prompt,
@ -517,6 +521,8 @@ async def async_langflow_chat_stream(
import json
chunk_data = json.loads(chunk.decode("utf-8"))
collected_chunks.append(chunk_data) # Collect all chunk data
if "delta" in chunk_data and "content" in chunk_data["delta"]:
full_response += chunk_data["delta"]["content"]
# Extract response_id from chunk
@ -528,13 +534,14 @@ async def async_langflow_chat_stream(
pass
yield chunk
# Add the complete assistant response to message history with response_id and timestamp
# Add the complete assistant response to message history with response_id, timestamp, and function call data
if full_response:
assistant_message = {
"role": "assistant",
"content": full_response,
"response_id": response_id,
"timestamp": datetime.now(),
"chunks": collected_chunks, # Store complete chunk data for function calls
}
conversation_state["messages"].append(assistant_message)

View file

@ -226,6 +226,13 @@ class ChatService:
}
if msg.get("response_id"):
message_data["response_id"] = msg["response_id"]
# Include function call data if present
if msg.get("chunks"):
message_data["chunks"] = msg["chunks"]
if msg.get("response_data"):
message_data["response_data"] = msg["response_data"]
messages.append(message_data)
if messages: # Only include conversations with actual messages
@ -305,6 +312,13 @@ class ChatService:
}
if msg.get("response_id"):
message_data["response_id"] = msg["response_id"]
# Include function call data if present
if msg.get("chunks"):
message_data["chunks"] = msg["chunks"]
if msg.get("response_data"):
message_data["response_data"] = msg["response_data"]
messages.append(message_data)
if messages: # Only include conversations with actual messages