diff --git a/frontend/app/api/queries/useGetConversationsQuery.ts b/frontend/app/api/queries/useGetConversationsQuery.ts index d77b7eff..66862605 100644 --- a/frontend/app/api/queries/useGetConversationsQuery.ts +++ b/frontend/app/api/queries/useGetConversationsQuery.ts @@ -4,6 +4,7 @@ import { useQueryClient, } from "@tanstack/react-query"; import type { EndpointType } from "@/contexts/chat-context"; +import { useChat } from "@/contexts/chat-context"; export interface RawConversation { response_id: string; @@ -50,6 +51,7 @@ export const useGetConversationsQuery = ( options?: Omit, ) => { const queryClient = useQueryClient(); + const { isOnboardingComplete } = useChat(); async function getConversations(context: { signal?: AbortSignal }): Promise { try { @@ -95,6 +97,11 @@ export const useGetConversationsQuery = ( } } + // Extract enabled from options and combine with onboarding completion check + // Query is only enabled if onboarding is complete AND the caller's enabled condition is met + const callerEnabled = options?.enabled ?? true; + const enabled = isOnboardingComplete && callerEnabled; + const queryResult = useQuery( { queryKey: ["conversations", endpoint, refreshTrigger], @@ -106,6 +113,7 @@ export const useGetConversationsQuery = ( refetchOnMount: false, // Don't refetch on every mount refetchOnWindowFocus: false, // Don't refetch when window regains focus ...options, + enabled, // Override enabled after spreading options to ensure onboarding check is applied }, queryClient, ); diff --git a/frontend/app/api/queries/useGetNudgesQuery.ts b/frontend/app/api/queries/useGetNudgesQuery.ts index 05c97bde..ac4c1fde 100644 --- a/frontend/app/api/queries/useGetNudgesQuery.ts +++ b/frontend/app/api/queries/useGetNudgesQuery.ts @@ -3,6 +3,8 @@ import { useQuery, useQueryClient, } from "@tanstack/react-query"; +import { useChat } from "@/contexts/chat-context"; +import { useProviderHealthQuery } from "./useProviderHealthQuery"; type Nudge = string; @@ -27,6 +29,13 @@ export const useGetNudgesQuery = ( ) => { const { chatId, filters, limit, scoreThreshold } = params ?? {}; const queryClient = useQueryClient(); + const { isOnboardingComplete } = useChat(); + + // Check if LLM provider is healthy + // If health data is not available yet, assume healthy (optimistic) + // Only disable if health data exists and shows LLM error + const { data: health } = useProviderHealthQuery(); + const isLLMHealthy = health === undefined || (health?.status === "healthy" && !health?.llm_error); function cancel() { queryClient.removeQueries({ @@ -77,6 +86,11 @@ export const useGetNudgesQuery = ( } } + // Extract enabled from options and combine with onboarding completion and LLM health checks + // Query is only enabled if onboarding is complete AND LLM provider is healthy AND the caller's enabled condition is met + const callerEnabled = options?.enabled ?? true; + const enabled = isOnboardingComplete && isLLMHealthy && callerEnabled; + const queryResult = useQuery( { queryKey: ["nudges", chatId, filters, limit, scoreThreshold], @@ -91,6 +105,7 @@ export const useGetNudgesQuery = ( return Array.isArray(data) && data.length === 0 ? 5000 : false; }, ...options, + enabled, // Override enabled after spreading options to ensure onboarding check is applied }, queryClient, ); diff --git a/frontend/app/api/queries/useProviderHealthQuery.ts b/frontend/app/api/queries/useProviderHealthQuery.ts index 6586e6dd..ab34e9b1 100644 --- a/frontend/app/api/queries/useProviderHealthQuery.ts +++ b/frontend/app/api/queries/useProviderHealthQuery.ts @@ -5,6 +5,7 @@ import { } from "@tanstack/react-query"; import { useChat } from "@/contexts/chat-context"; import { useGetSettingsQuery } from "./useGetSettingsQuery"; +import { useGetTasksQuery } from "./useGetTasksQuery"; export interface ProviderHealthDetails { llm_model: string; @@ -40,11 +41,20 @@ export const useProviderHealthQuery = ( ) => { const queryClient = useQueryClient(); - // Get chat error state from context (ChatProvider wraps the entire app in layout.tsx) - const { hasChatError, setChatError } = useChat(); + // Get chat error state and onboarding completion from context (ChatProvider wraps the entire app in layout.tsx) + const { hasChatError, setChatError, isOnboardingComplete } = useChat(); const { data: settings = {} } = useGetSettingsQuery(); + // Check if there are any active ingestion tasks + const { data: tasks = [] } = useGetTasksQuery(); + const hasActiveIngestion = tasks.some( + (task) => + task.status === "pending" || + task.status === "running" || + task.status === "processing", + ); + async function checkProviderHealth(): Promise { try { const url = new URL("/api/provider/health", window.location.origin); @@ -55,6 +65,7 @@ export const useProviderHealthQuery = ( } // Add test_completion query param if specified or if chat error exists + // Use the same testCompletion value that's in the queryKey const testCompletion = params?.test_completion ?? hasChatError; if (testCompletion) { url.searchParams.set("test_completion", "true"); @@ -101,7 +112,10 @@ export const useProviderHealthQuery = ( } } - const queryKey = ["provider", "health", params?.test_completion]; + // Include hasChatError in queryKey so React Query refetches when it changes + // This ensures the health check runs with test_completion=true when chat errors occur + const testCompletion = params?.test_completion ?? hasChatError; + const queryKey = ["provider", "health", testCompletion, hasChatError]; const failureCountKey = queryKey.join("-"); const queryResult = useQuery( @@ -143,7 +157,11 @@ export const useProviderHealthQuery = ( refetchOnWindowFocus: false, // Disabled to reduce unnecessary calls on tab switches refetchOnMount: true, staleTime: 30000, // Consider data stale after 30 seconds - enabled: !!settings?.edited && options?.enabled !== false, // Only run after onboarding is complete + enabled: + !!settings?.edited && + isOnboardingComplete && + !hasActiveIngestion && // Disable health checks when ingestion is happening + options?.enabled !== false, // Only run after onboarding is complete ...options, }, queryClient, diff --git a/frontend/app/chat/page.tsx b/frontend/app/chat/page.tsx index 87ae6b60..f15cf788 100644 --- a/frontend/app/chat/page.tsx +++ b/frontend/app/chat/page.tsx @@ -10,1240 +10,1271 @@ import { useTask } from "@/contexts/task-context"; import { useChatStreaming } from "@/hooks/useChatStreaming"; import { FILE_CONFIRMATION, FILES_REGEX } from "@/lib/constants"; import { useLoadingStore } from "@/stores/loadingStore"; +import { useGetConversationsQuery } from "../api/queries/useGetConversationsQuery"; import { useGetNudgesQuery } from "../api/queries/useGetNudgesQuery"; import { AssistantMessage } from "./_components/assistant-message"; import { ChatInput, type ChatInputHandle } from "./_components/chat-input"; import Nudges from "./_components/nudges"; import { UserMessage } from "./_components/user-message"; import type { - FunctionCall, - KnowledgeFilterData, - Message, - RequestBody, - SelectedFilters, - ToolCallResult, + FunctionCall, + KnowledgeFilterData, + Message, + RequestBody, + SelectedFilters, + ToolCallResult, } from "./_types/types"; function ChatPage() { - const isDebugMode = process.env.NEXT_PUBLIC_OPENRAG_DEBUG === "true"; - const { - endpoint, - setEndpoint, - currentConversationId, - conversationData, - setCurrentConversationId, - addConversationDoc, - forkFromResponse, - refreshConversations, - refreshConversationsSilent, - previousResponseIds, - setPreviousResponseIds, - placeholderConversation, - conversationFilter, - setConversationFilter, - } = useChat(); - const [messages, setMessages] = useState([ - { - role: "assistant", - content: "How can I assist?", - timestamp: new Date(), - }, - ]); - const [input, setInput] = useState(""); - const { loading, setLoading } = useLoadingStore(); - const { setChatError } = useChat(); - const [asyncMode, setAsyncMode] = useState(true); - const [expandedFunctionCalls, setExpandedFunctionCalls] = useState< - Set - >(new Set()); - // previousResponseIds now comes from useChat context - const [isUploading, setIsUploading] = useState(false); - const [isFilterHighlighted, setIsFilterHighlighted] = useState(false); - const [isUserInteracting, setIsUserInteracting] = useState(false); - const [isForkingInProgress, setIsForkingInProgress] = useState(false); - const [uploadedFile, setUploadedFile] = useState(null); - const [waitingTooLong, setWaitingTooLong] = useState(false); - - const chatInputRef = useRef(null); - - const { scrollToBottom } = useStickToBottomContext(); - - const lastLoadedConversationRef = useRef(null); - const { addTask } = useTask(); - - // Use conversation-specific filter instead of global filter - const selectedFilter = conversationFilter; - - // Parse the conversation filter data - const parsedFilterData = useMemo(() => { - if (!selectedFilter?.query_data) return null; - try { - return JSON.parse(selectedFilter.query_data); - } catch (error) { - console.error("Error parsing filter data:", error); - return null; - } - }, [selectedFilter]); - - // Use the chat streaming hook - const apiEndpoint = endpoint === "chat" ? "/api/chat" : "/api/langflow"; - const { - streamingMessage, - sendMessage: sendStreamingMessage, - abortStream, - isLoading: isStreamLoading, - } = useChatStreaming({ - endpoint: apiEndpoint, - onComplete: (message, responseId) => { - setMessages((prev) => [...prev, message]); - setLoading(false); - setWaitingTooLong(false); - if (responseId) { - cancelNudges(); - setPreviousResponseIds((prev) => ({ - ...prev, - [endpoint]: responseId, - })); - - if (!currentConversationId) { - setCurrentConversationId(responseId); - refreshConversations(true); - } else { - refreshConversationsSilent(); - } - - // Save filter association for this response - if (conversationFilter && typeof window !== "undefined") { - const newKey = `conversation_filter_${responseId}`; - localStorage.setItem(newKey, conversationFilter.id); - console.log("[CHAT] Saved filter association:", newKey, "=", conversationFilter.id); - } - } - }, - onError: (error) => { - console.error("Streaming error:", error); - setLoading(false); - setWaitingTooLong(false); - // Set chat error flag to trigger test_completion=true on health checks - setChatError(true); - const errorMessage: Message = { - role: "assistant", - content: - "Sorry, I couldn't connect to the chat service. Please try again.", - timestamp: new Date(), - }; - setMessages((prev) => [...prev, errorMessage]); - }, - }); - - // Show warning if waiting too long (20 seconds) - useEffect(() => { - let timeoutId: NodeJS.Timeout | null = null; - - if (isStreamLoading && !streamingMessage) { - timeoutId = setTimeout(() => { - setWaitingTooLong(true); - }, 20000); // 20 seconds - } else { - setWaitingTooLong(false); - } - - return () => { - if (timeoutId) clearTimeout(timeoutId); - }; - }, [isStreamLoading, streamingMessage]); - - const handleEndpointChange = (newEndpoint: EndpointType) => { - setEndpoint(newEndpoint); - // Clear the conversation when switching endpoints to avoid response ID conflicts - setMessages([]); - setPreviousResponseIds({ chat: null, langflow: null }); - }; - - const handleFileUpload = async (file: File) => { - console.log("handleFileUpload called with file:", file.name); - - if (isUploading) return; - - setIsUploading(true); - setLoading(true); - - try { - const formData = new FormData(); - formData.append("file", file); - formData.append("endpoint", endpoint); - - // Add previous_response_id if we have one for this endpoint - const currentResponseId = previousResponseIds[endpoint]; - if (currentResponseId) { - formData.append("previous_response_id", currentResponseId); - } - - const response = await fetch("/api/upload_context", { - method: "POST", - body: formData, - }); - - console.log("Upload response status:", response.status); - - if (!response.ok) { - const errorText = await response.text(); - console.error( - "Upload failed with status:", - response.status, - "Response:", - errorText, - ); - throw new Error("Failed to process document"); - } - - const result = await response.json(); - console.log("Upload result:", result); - - if (!response.ok) { - // Set chat error flag if upload fails - setChatError(true); - } - - if (response.status === 201) { - // New flow: Got task ID, start tracking with centralized system - const taskId = result.task_id || result.id; - - if (!taskId) { - console.error("No task ID in 201 response:", result); - throw new Error("No task ID received from server"); - } - - // Add task to centralized tracking - addTask(taskId); - - return null; - } else if (response.ok) { - // Original flow: Direct response - - const uploadMessage: Message = { - role: "user", - content: `I'm uploading a document called "${result.filename}". Here is its content:`, - timestamp: new Date(), - }; - - const confirmationMessage: Message = { - role: "assistant", - content: `Confirmed`, - timestamp: new Date(), - }; - - setMessages((prev) => [...prev, uploadMessage, confirmationMessage]); - - // Add file to conversation docs - if (result.filename) { - addConversationDoc(result.filename); - } - - // Update the response ID for this endpoint - if (result.response_id) { - setPreviousResponseIds((prev) => ({ - ...prev, - [endpoint]: result.response_id, - })); - - // If this is a new conversation (no currentConversationId), set it now - if (!currentConversationId) { - setCurrentConversationId(result.response_id); - refreshConversations(true); - } else { - // For existing conversations, do a silent refresh to keep backend in sync - refreshConversationsSilent(); - } - - return result.response_id; - } - } else { - throw new Error(`Upload failed: ${response.status}`); - } - } catch (error) { - console.error("Upload failed:", error); - // Set chat error flag to trigger test_completion=true on health checks - setChatError(true); - const errorMessage: Message = { - role: "assistant", - content: `❌ Failed to process document. Please try again.`, - timestamp: new Date(), - }; - setMessages((prev) => [...prev.slice(0, -1), errorMessage]); - } finally { - setIsUploading(false); - setLoading(false); - } - }; - - const handleFilePickerClick = () => { - chatInputRef.current?.clickFileInput(); - }; - - const handleFilterSelect = (filter: KnowledgeFilterData | null) => { - // Update conversation-specific filter - setConversationFilter(filter); - setIsFilterHighlighted(false); - }; - - // Auto-focus the input on component mount - useEffect(() => { - chatInputRef.current?.focusInput(); - }, []); - - // Explicitly handle external new conversation trigger - useEffect(() => { - const handleNewConversation = () => { - // Abort any in-flight streaming so it doesn't bleed into new chat - abortStream(); - // Reset chat UI even if context state was already 'new' - setMessages([ - { - role: "assistant", - content: "How can I assist?", - timestamp: new Date(), - }, - ]); - setInput(""); - setExpandedFunctionCalls(new Set()); - setIsFilterHighlighted(false); - setLoading(false); - lastLoadedConversationRef.current = null; - - // Focus input after a short delay to ensure rendering is complete - setTimeout(() => { - chatInputRef.current?.focusInput(); - }, 100); - }; - - const handleFocusInput = () => { - chatInputRef.current?.focusInput(); - }; - - window.addEventListener("newConversation", handleNewConversation); - window.addEventListener("focusInput", handleFocusInput); - return () => { - window.removeEventListener("newConversation", handleNewConversation); - window.removeEventListener("focusInput", handleFocusInput); - }; - }, [abortStream, setLoading]); - - // Load conversation only when user explicitly selects a conversation - useEffect(() => { - // Only load conversation data when: - // 1. conversationData exists AND - // 2. It's different from the last loaded conversation AND - // 3. User is not in the middle of an interaction - if ( - conversationData?.messages && - lastLoadedConversationRef.current !== conversationData.response_id && - !isUserInteracting && - !isForkingInProgress - ) { - console.log( - "Loading conversation with", - conversationData.messages.length, - "messages", - ); - // Convert backend message format to frontend Message interface - const convertedMessages: Message[] = conversationData.messages.map( - (msg: { - role: string; - content: string; - timestamp?: string; - response_id?: string; - chunks?: Array<{ - item?: { - type?: string; - tool_name?: string; - id?: string; - inputs?: unknown; - results?: unknown; - status?: string; - }; - delta?: { - tool_calls?: Array<{ - id?: string; - function?: { name?: string; arguments?: string }; - type?: string; - }>; - }; - type?: string; - result?: unknown; - output?: unknown; - response?: unknown; - }>; - response_data?: unknown; - }) => { - const message: Message = { - role: msg.role as "user" | "assistant", - content: msg.content, - timestamp: new Date(msg.timestamp || new Date()), - }; - - // Extract function calls from chunks or response_data - if (msg.role === "assistant" && (msg.chunks || msg.response_data)) { - const functionCalls: FunctionCall[] = []; - console.log("Processing assistant message for function calls:", { - hasChunks: !!msg.chunks, - chunksLength: msg.chunks?.length, - hasResponseData: !!msg.response_data, - }); - - // Process chunks (streaming data) - if (msg.chunks && Array.isArray(msg.chunks)) { - for (const chunk of msg.chunks) { - // Handle Langflow format: chunks[].item.tool_call - if (chunk.item && chunk.item.type === "tool_call") { - const toolCall = chunk.item; - console.log("Found Langflow tool call:", toolCall); - functionCalls.push({ - id: toolCall.id || "", - name: toolCall.tool_name || "unknown", - arguments: - (toolCall.inputs as Record) || {}, - argumentsString: JSON.stringify(toolCall.inputs || {}), - result: toolCall.results as - | Record - | ToolCallResult[], - status: - (toolCall.status as "pending" | "completed" | "error") || - "completed", - type: "tool_call", - }); - } - // Handle OpenAI format: chunks[].delta.tool_calls - else if (chunk.delta?.tool_calls) { - for (const toolCall of chunk.delta.tool_calls) { - if (toolCall.function) { - functionCalls.push({ - id: toolCall.id || "", - name: toolCall.function.name || "unknown", - arguments: toolCall.function.arguments - ? JSON.parse(toolCall.function.arguments) - : {}, - argumentsString: toolCall.function.arguments || "", - status: "completed", - type: toolCall.type || "function", - }); - } - } - } - // Process tool call results from chunks - if ( - chunk.type === "response.tool_call.result" || - chunk.type === "tool_call_result" - ) { - const lastCall = functionCalls[functionCalls.length - 1]; - if (lastCall) { - lastCall.result = - (chunk.result as - | Record - | ToolCallResult[]) || - (chunk as Record); - lastCall.status = "completed"; - } - } - } - } - - // Process response_data (non-streaming data) - if (msg.response_data && typeof msg.response_data === "object") { - // Look for tool_calls in various places in the response data - const responseData = - typeof msg.response_data === "string" - ? JSON.parse(msg.response_data) - : msg.response_data; - - if ( - responseData.tool_calls && - Array.isArray(responseData.tool_calls) - ) { - for (const toolCall of responseData.tool_calls) { - functionCalls.push({ - id: toolCall.id, - name: toolCall.function?.name || toolCall.name, - arguments: - toolCall.function?.arguments || toolCall.arguments, - argumentsString: - typeof ( - toolCall.function?.arguments || toolCall.arguments - ) === "string" - ? toolCall.function?.arguments || toolCall.arguments - : JSON.stringify( - toolCall.function?.arguments || toolCall.arguments, - ), - result: toolCall.result, - status: "completed", - type: toolCall.type || "function", - }); - } - } - } - - if (functionCalls.length > 0) { - console.log("Setting functionCalls on message:", functionCalls); - message.functionCalls = functionCalls; - } else { - console.log("No function calls found in message"); - } - } - - return message; - }, - ); - - setMessages(convertedMessages); - lastLoadedConversationRef.current = conversationData.response_id; - - // Set the previous response ID for this conversation - setPreviousResponseIds((prev) => ({ - ...prev, - [conversationData.endpoint]: conversationData.response_id, - })); - - // Focus input when loading a conversation - setTimeout(() => { - chatInputRef.current?.focusInput(); - }, 100); - } - }, [ - conversationData, - isUserInteracting, - isForkingInProgress, - setPreviousResponseIds, - ]); - - // Handle new conversation creation - only reset messages when placeholderConversation is set - useEffect(() => { - if (placeholderConversation && currentConversationId === null) { - console.log("Starting new conversation"); - setMessages([ - { - role: "assistant", - content: "How can I assist?", - timestamp: new Date(), - }, - ]); - lastLoadedConversationRef.current = null; - - // Focus input when starting a new conversation - setTimeout(() => { - chatInputRef.current?.focusInput(); - }, 100); - } - }, [placeholderConversation, currentConversationId]); - - // Listen for file upload events from navigation - useEffect(() => { - const handleFileUploadStart = (event: CustomEvent) => { - const { filename } = event.detail; - console.log("Chat page received file upload start event:", filename); - - setLoading(true); - setIsUploading(true); - setUploadedFile(null); // Clear previous file - }; - - const handleFileUploaded = (event: CustomEvent) => { - const { result } = event.detail; - console.log("Chat page received file upload event:", result); - - setUploadedFile(null); // Clear file after upload - - // Update the response ID for this endpoint - if (result.response_id) { - setPreviousResponseIds((prev) => ({ - ...prev, - [endpoint]: result.response_id, - })); - } - }; - - const handleFileUploadComplete = () => { - console.log("Chat page received file upload complete event"); - setLoading(false); - setIsUploading(false); - }; - - const handleFileUploadError = (event: CustomEvent) => { - const { filename, error } = event.detail; - console.log( - "Chat page received file upload error event:", - filename, - error, - ); - - // Replace the last message with error message - const errorMessage: Message = { - role: "assistant", - content: `❌ Upload failed for **${filename}**: ${error}`, - timestamp: new Date(), - }; - setMessages((prev) => [...prev.slice(0, -1), errorMessage]); - setUploadedFile(null); // Clear file on error - }; - - window.addEventListener( - "fileUploadStart", - handleFileUploadStart as EventListener, - ); - window.addEventListener( - "fileUploaded", - handleFileUploaded as EventListener, - ); - window.addEventListener( - "fileUploadComplete", - handleFileUploadComplete as EventListener, - ); - window.addEventListener( - "fileUploadError", - handleFileUploadError as EventListener, - ); - - return () => { - window.removeEventListener( - "fileUploadStart", - handleFileUploadStart as EventListener, - ); - window.removeEventListener( - "fileUploaded", - handleFileUploaded as EventListener, - ); - window.removeEventListener( - "fileUploadComplete", - handleFileUploadComplete as EventListener, - ); - window.removeEventListener( - "fileUploadError", - handleFileUploadError as EventListener, - ); - }; - }, [endpoint, setPreviousResponseIds, setLoading]); - - // Check if onboarding is complete by looking at local storage - const [isOnboardingComplete, setIsOnboardingComplete] = useState(() => { - if (typeof window === "undefined") return false; - return localStorage.getItem("onboarding-step") === null; - }); - - // Listen for storage changes to detect when onboarding completes - useEffect(() => { - const checkOnboarding = () => { - if (typeof window !== "undefined") { - setIsOnboardingComplete( - localStorage.getItem("onboarding-step") === null, - ); - } - }; - - // Check periodically since storage events don't fire in the same tab - const interval = setInterval(checkOnboarding, 500); - - return () => clearInterval(interval); - }, []); - - // Prepare filters for nudges (same as chat) - const processedFiltersForNudges = parsedFilterData?.filters - ? (() => { - const filters = parsedFilterData.filters; - const processed: SelectedFilters = { - data_sources: [], - document_types: [], - owners: [], - }; - processed.data_sources = filters.data_sources.includes("*") - ? [] - : filters.data_sources; - processed.document_types = filters.document_types.includes("*") - ? [] - : filters.document_types; - processed.owners = filters.owners.includes("*") ? [] : filters.owners; - - const hasFilters = - processed.data_sources.length > 0 || - processed.document_types.length > 0 || - processed.owners.length > 0; - return hasFilters ? processed : undefined; - })() - : undefined; - - const { data: nudges = [], cancel: cancelNudges } = useGetNudgesQuery( - { - chatId: previousResponseIds[endpoint], - filters: processedFiltersForNudges, - limit: parsedFilterData?.limit ?? 3, - scoreThreshold: parsedFilterData?.scoreThreshold ?? 0, - }, - { - enabled: isOnboardingComplete, // Only fetch nudges after onboarding is complete - }, - ); - - const handleSSEStream = async ( - userMessage: Message, - previousResponseId?: string, - ) => { - // Prepare filters - const processedFilters = parsedFilterData?.filters - ? (() => { - const filters = parsedFilterData.filters; - const processed: SelectedFilters = { - data_sources: [], - document_types: [], - owners: [], - }; - processed.data_sources = filters.data_sources.includes("*") - ? [] - : filters.data_sources; - processed.document_types = filters.document_types.includes("*") - ? [] - : filters.document_types; - processed.owners = filters.owners.includes("*") ? [] : filters.owners; - - const hasFilters = - processed.data_sources.length > 0 || - processed.document_types.length > 0 || - processed.owners.length > 0; - return hasFilters ? processed : undefined; - })() - : undefined; - - // Use passed previousResponseId if available, otherwise fall back to state - const responseIdToUse = previousResponseId || previousResponseIds[endpoint]; - - console.log("[CHAT] Sending streaming message:", { - conversationFilter: conversationFilter?.id, - currentConversationId, - responseIdToUse, - }); - - // Use the hook to send the message - await sendStreamingMessage({ - prompt: userMessage.content, - previousResponseId: responseIdToUse || undefined, - filters: processedFilters, - filter_id: conversationFilter?.id, // ✅ Add filter_id for this conversation - limit: parsedFilterData?.limit ?? 10, - scoreThreshold: parsedFilterData?.scoreThreshold ?? 0, - }); - scrollToBottom({ - animation: "smooth", - duration: 1000, - }); - }; - - const handleSendMessage = async ( - inputMessage: string, - previousResponseId?: string, - ) => { - if (!inputMessage.trim() || loading) return; - - const userMessage: Message = { - role: "user", - content: inputMessage.trim(), - timestamp: new Date(), - }; - - if (messages.length === 1) { - setMessages([userMessage]); - } else { - setMessages((prev) => [...prev, userMessage]); - } - setInput(""); - setLoading(true); - setIsFilterHighlighted(false); - - scrollToBottom({ - animation: "smooth", - duration: 1000, - }); - - if (asyncMode) { - await handleSSEStream(userMessage, previousResponseId); - } else { - // Original non-streaming logic - try { - const apiEndpoint = endpoint === "chat" ? "/api/chat" : "/api/langflow"; - - const requestBody: RequestBody = { - prompt: userMessage.content, - ...(parsedFilterData?.filters && - (() => { - const filters = parsedFilterData.filters; - const processed: SelectedFilters = { - data_sources: [], - document_types: [], - owners: [], - }; - // Only copy non-wildcard arrays - processed.data_sources = filters.data_sources.includes("*") - ? [] - : filters.data_sources; - processed.document_types = filters.document_types.includes("*") - ? [] - : filters.document_types; - processed.owners = filters.owners.includes("*") - ? [] - : filters.owners; - - // Only include filters if any array has values - const hasFilters = - processed.data_sources.length > 0 || - processed.document_types.length > 0 || - processed.owners.length > 0; - return hasFilters ? { filters: processed } : {}; - })()), - limit: parsedFilterData?.limit ?? 10, - scoreThreshold: parsedFilterData?.scoreThreshold ?? 0, - }; - - // Add previous_response_id if we have one for this endpoint - const currentResponseId = previousResponseIds[endpoint]; - if (currentResponseId) { - requestBody.previous_response_id = currentResponseId; - } - - // Add filter_id if a filter is selected for this conversation - if (conversationFilter) { - requestBody.filter_id = conversationFilter.id; - } - - // Debug logging - console.log("[DEBUG] Sending message with:", { - previous_response_id: requestBody.previous_response_id, - filter_id: requestBody.filter_id, - currentConversationId, - previousResponseIds, - }); - - const response = await fetch(apiEndpoint, { - method: "POST", - headers: { - "Content-Type": "application/json", - }, - body: JSON.stringify(requestBody), - }); - - const result = await response.json(); - - if (response.ok) { - const assistantMessage: Message = { - role: "assistant", - content: result.response, - timestamp: new Date(), - }; - setMessages((prev) => [...prev, assistantMessage]); - if (result.response_id) { - cancelNudges(); - } - - // Store the response ID if present for this endpoint - if (result.response_id) { - console.log("[DEBUG] Received response_id:", result.response_id, "currentConversationId:", currentConversationId); - - setPreviousResponseIds((prev) => ({ - ...prev, - [endpoint]: result.response_id, - })); - - // If this is a new conversation (no currentConversationId), set it now - if (!currentConversationId) { - console.log("[DEBUG] Setting currentConversationId to:", result.response_id); - setCurrentConversationId(result.response_id); - refreshConversations(true); - } else { - console.log("[DEBUG] Existing conversation, doing silent refresh"); - // For existing conversations, do a silent refresh to keep backend in sync - refreshConversationsSilent(); - } - - // Carry forward the filter association to the new response_id - if (conversationFilter && typeof window !== "undefined") { - const newKey = `conversation_filter_${result.response_id}`; - localStorage.setItem(newKey, conversationFilter.id); - console.log("[DEBUG] Saved filter association:", newKey, "=", conversationFilter.id); - } - } - } else { - console.error("Chat failed:", result.error); - // Set chat error flag to trigger test_completion=true on health checks - setChatError(true); - const errorMessage: Message = { - role: "assistant", - content: "Sorry, I encountered an error. Please try again.", - timestamp: new Date(), - }; - setMessages((prev) => [...prev, errorMessage]); - } - } catch (error) { - console.error("Chat error:", error); - // Set chat error flag to trigger test_completion=true on health checks - setChatError(true); - const errorMessage: Message = { - role: "assistant", - content: - "Sorry, I couldn't connect to the chat service. Please try again.", - timestamp: new Date(), - }; - setMessages((prev) => [...prev, errorMessage]); - } - } - - setLoading(false); - }; - - const handleSubmit = async (e: React.FormEvent) => { - e.preventDefault(); - - // Check if there's an uploaded file and upload it first - let uploadedResponseId: string | null = null; - if (uploadedFile) { - // Upload the file first - const responseId = await handleFileUpload(uploadedFile); - // Clear the file after upload - setUploadedFile(null); - - // If the upload resulted in a new conversation, store the response ID - if (responseId) { - uploadedResponseId = responseId; - setPreviousResponseIds((prev) => ({ - ...prev, - [endpoint]: responseId, - })); - } - } - - // Only send message if there's input text - if (input.trim() || uploadedFile) { - // Pass the responseId from upload (if any) to handleSendMessage - handleSendMessage( - !input.trim() ? FILE_CONFIRMATION : input, - uploadedResponseId || undefined, - ); - } - }; - - const toggleFunctionCall = (functionCallId: string) => { - setExpandedFunctionCalls((prev) => { - const newSet = new Set(prev); - if (newSet.has(functionCallId)) { - newSet.delete(functionCallId); - } else { - newSet.add(functionCallId); - } - return newSet; - }); - }; - - const handleForkConversation = ( - messageIndex: number, - event?: React.MouseEvent, - ) => { - // Prevent any default behavior and stop event propagation - if (event) { - event.preventDefault(); - event.stopPropagation(); - } - - // Set interaction state to prevent auto-scroll interference - setIsUserInteracting(true); - setIsForkingInProgress(true); - - console.log("Fork conversation called for message index:", messageIndex); - - // Get messages up to and including the selected assistant message - const messagesToKeep = messages.slice(0, messageIndex + 1); - - // The selected message should be an assistant message (since fork button is only on assistant messages) - const forkedMessage = messages[messageIndex]; - if (forkedMessage.role !== "assistant") { - console.error("Fork button should only be on assistant messages"); - setIsUserInteracting(false); - setIsForkingInProgress(false); - return; - } - - // For forking, we want to continue from the response_id of the assistant message we're forking from - // Since we don't store individual response_ids per message yet, we'll use the current conversation's response_id - // This means we're continuing the conversation thread from that point - const responseIdToForkFrom = - currentConversationId || previousResponseIds[endpoint]; - - // Create a new conversation by properly forking - setMessages(messagesToKeep); - - // Use the chat context's fork method which handles creating a new conversation properly - if (forkFromResponse) { - forkFromResponse(responseIdToForkFrom || ""); - } else { - // Fallback to manual approach - setCurrentConversationId(null); // This creates a new conversation thread - - // Set the response_id we want to continue from as the previous response ID - // This tells the backend to continue the conversation from this point - setPreviousResponseIds((prev) => ({ - ...prev, - [endpoint]: responseIdToForkFrom, - })); - } - - console.log("Forked conversation with", messagesToKeep.length, "messages"); - - // Reset interaction state after a longer delay to ensure all effects complete - setTimeout(() => { - setIsUserInteracting(false); - setIsForkingInProgress(false); - console.log("Fork interaction complete, re-enabling auto effects"); - }, 500); - - // The original conversation remains unchanged in the sidebar - // This new forked conversation will get its own response_id when the user sends the next message - }; - - const handleSuggestionClick = (suggestion: string) => { - handleSendMessage(suggestion); - }; - - return ( - <> - {/* Debug header - only show in debug mode */} - {isDebugMode && ( -
-
-
- {/* Async Mode Toggle */} -
- - -
- {/* Endpoint Toggle */} -
- - -
-
-
- )} - - -
- {messages.length === 0 && !streamingMessage ? ( -
-
- {isUploading ? ( - <> - -

Processing your document...

-

This may take a few moments

- - ) : null} -
-
- ) : ( - <> - {messages.map((message, index) => - message.role === "user" - ? (messages[index]?.content.match(FILES_REGEX)?.[0] ?? - null) === null && ( -
- = 2 && - (messages[index - 2]?.content.match( - FILES_REGEX, - )?.[0] ?? - undefined) && - message.content === FILE_CONFIRMATION - ? undefined - : message.content - } - files={ - index >= 2 - ? (messages[index - 2]?.content.match( - FILES_REGEX, - )?.[0] ?? undefined) - : undefined - } - /> -
- ) - : message.role === "assistant" && - (index < 1 || - (messages[index - 1]?.content.match(FILES_REGEX)?.[0] ?? - null) === null) && ( -
- handleForkConversation(index, e)} - animate={false} - isInactive={index < messages.length - 1} - isInitialGreeting={ - index === 0 && - messages.length === 1 && - message.content === "How can I assist?" - } - /> -
- ), - )} - - {/* Streaming Message Display */} - {streamingMessage && ( - - )} - - {/* Waiting too long indicator */} - {waitingTooLong && !streamingMessage && loading && ( -
-
- - The server is taking longer than expected... -
-

- This may be due to high server load. The request will - timeout after 60 seconds. -

-
- )} - - )} - {!streamingMessage && ( -
- -
- )} -
-
-
- {/* Input Area - Fixed at bottom */} - { - // Handle backspace for filter clearing - if ( - e.key === "Backspace" && - selectedFilter && - input.trim() === "" - ) { - e.preventDefault(); - if (isFilterHighlighted) { - // Second backspace - remove the filter - setConversationFilter(null); - setIsFilterHighlighted(false); - } else { - // First backspace - highlight the filter - setIsFilterHighlighted(true); - } - return; - } - - // Handle Enter key for form submission - if (e.key === "Enter" && !e.shiftKey) { - e.preventDefault(); - if (input.trim() && !loading) { - // Trigger form submission by finding the form and calling submit - const form = e.currentTarget.closest("form"); - if (form) { - form.requestSubmit(); - } - } - } - }} - onFilterSelect={handleFilterSelect} - onFilePickerClick={handleFilePickerClick} - onFileSelected={setUploadedFile} - setSelectedFilter={setConversationFilter} - setIsFilterHighlighted={setIsFilterHighlighted} - /> -
- - ); + const isDebugMode = process.env.NEXT_PUBLIC_OPENRAG_DEBUG === "true"; + const { + endpoint, + setEndpoint, + currentConversationId, + conversationData, + setCurrentConversationId, + addConversationDoc, + forkFromResponse, + refreshConversations, + refreshConversationsSilent, + refreshTrigger, + previousResponseIds, + setPreviousResponseIds, + placeholderConversation, + conversationFilter, + setConversationFilter, + } = useChat(); + const [messages, setMessages] = useState([ + { + role: "assistant", + content: "How can I assist?", + timestamp: new Date(), + }, + ]); + const [input, setInput] = useState(""); + const { loading, setLoading } = useLoadingStore(); + const { setChatError } = useChat(); + const [asyncMode, setAsyncMode] = useState(true); + const [expandedFunctionCalls, setExpandedFunctionCalls] = useState< + Set + >(new Set()); + // previousResponseIds now comes from useChat context + const [isUploading, setIsUploading] = useState(false); + const [isFilterHighlighted, setIsFilterHighlighted] = useState(false); + const [isUserInteracting, setIsUserInteracting] = useState(false); + const [isForkingInProgress, setIsForkingInProgress] = useState(false); + const [uploadedFile, setUploadedFile] = useState(null); + const [waitingTooLong, setWaitingTooLong] = useState(false); + + const chatInputRef = useRef(null); + + const { scrollToBottom } = useStickToBottomContext(); + + const lastLoadedConversationRef = useRef(null); + const { addTask } = useTask(); + + // Check if chat history is loading + const { isLoading: isConversationsLoading } = useGetConversationsQuery( + endpoint, + refreshTrigger, + ); + + // Use conversation-specific filter instead of global filter + const selectedFilter = conversationFilter; + + // Parse the conversation filter data + const parsedFilterData = useMemo(() => { + if (!selectedFilter?.query_data) return null; + try { + return JSON.parse(selectedFilter.query_data); + } catch (error) { + console.error("Error parsing filter data:", error); + return null; + } + }, [selectedFilter]); + + // Use the chat streaming hook + const apiEndpoint = endpoint === "chat" ? "/api/chat" : "/api/langflow"; + const { + streamingMessage, + sendMessage: sendStreamingMessage, + abortStream, + isLoading: isStreamLoading, + } = useChatStreaming({ + endpoint: apiEndpoint, + onComplete: (message, responseId) => { + setMessages((prev) => [...prev, message]); + setLoading(false); + setWaitingTooLong(false); + if (responseId) { + cancelNudges(); + setPreviousResponseIds((prev) => ({ + ...prev, + [endpoint]: responseId, + })); + + if (!currentConversationId) { + setCurrentConversationId(responseId); + refreshConversations(true); + } else { + refreshConversationsSilent(); + } + + // Save filter association for this response + if (conversationFilter && typeof window !== "undefined") { + const newKey = `conversation_filter_${responseId}`; + localStorage.setItem(newKey, conversationFilter.id); + console.log( + "[CHAT] Saved filter association:", + newKey, + "=", + conversationFilter.id, + ); + } + } + }, + onError: (error) => { + console.error("Streaming error:", error); + setLoading(false); + setWaitingTooLong(false); + // Set chat error flag to trigger test_completion=true on health checks + setChatError(true); + const errorMessage: Message = { + role: "assistant", + content: + "Sorry, I couldn't connect to the chat service. Please try again.", + timestamp: new Date(), + }; + setMessages((prev) => [...prev, errorMessage]); + }, + }); + + // Show warning if waiting too long (20 seconds) + useEffect(() => { + let timeoutId: NodeJS.Timeout | null = null; + + if (isStreamLoading && !streamingMessage) { + timeoutId = setTimeout(() => { + setWaitingTooLong(true); + }, 20000); // 20 seconds + } else { + setWaitingTooLong(false); + } + + return () => { + if (timeoutId) clearTimeout(timeoutId); + }; + }, [isStreamLoading, streamingMessage]); + + const handleEndpointChange = (newEndpoint: EndpointType) => { + setEndpoint(newEndpoint); + // Clear the conversation when switching endpoints to avoid response ID conflicts + setMessages([]); + setPreviousResponseIds({ chat: null, langflow: null }); + }; + + const handleFileUpload = async (file: File) => { + console.log("handleFileUpload called with file:", file.name); + + if (isUploading) return; + + setIsUploading(true); + setLoading(true); + + try { + const formData = new FormData(); + formData.append("file", file); + formData.append("endpoint", endpoint); + + // Add previous_response_id if we have one for this endpoint + const currentResponseId = previousResponseIds[endpoint]; + if (currentResponseId) { + formData.append("previous_response_id", currentResponseId); + } + + const response = await fetch("/api/upload_context", { + method: "POST", + body: formData, + }); + + console.log("Upload response status:", response.status); + + if (!response.ok) { + const errorText = await response.text(); + console.error( + "Upload failed with status:", + response.status, + "Response:", + errorText, + ); + throw new Error("Failed to process document"); + } + + const result = await response.json(); + console.log("Upload result:", result); + + if (!response.ok) { + // Set chat error flag if upload fails + setChatError(true); + } + + if (response.status === 201) { + // New flow: Got task ID, start tracking with centralized system + const taskId = result.task_id || result.id; + + if (!taskId) { + console.error("No task ID in 201 response:", result); + throw new Error("No task ID received from server"); + } + + // Add task to centralized tracking + addTask(taskId); + + return null; + } else if (response.ok) { + // Original flow: Direct response + + const uploadMessage: Message = { + role: "user", + content: `I'm uploading a document called "${result.filename}". Here is its content:`, + timestamp: new Date(), + }; + + const confirmationMessage: Message = { + role: "assistant", + content: `Confirmed`, + timestamp: new Date(), + }; + + setMessages((prev) => [...prev, uploadMessage, confirmationMessage]); + + // Add file to conversation docs + if (result.filename) { + addConversationDoc(result.filename); + } + + // Update the response ID for this endpoint + if (result.response_id) { + setPreviousResponseIds((prev) => ({ + ...prev, + [endpoint]: result.response_id, + })); + + // If this is a new conversation (no currentConversationId), set it now + if (!currentConversationId) { + setCurrentConversationId(result.response_id); + refreshConversations(true); + } else { + // For existing conversations, do a silent refresh to keep backend in sync + refreshConversationsSilent(); + } + + return result.response_id; + } + } else { + throw new Error(`Upload failed: ${response.status}`); + } + } catch (error) { + console.error("Upload failed:", error); + // Set chat error flag to trigger test_completion=true on health checks + setChatError(true); + const errorMessage: Message = { + role: "assistant", + content: `❌ Failed to process document. Please try again.`, + timestamp: new Date(), + }; + setMessages((prev) => [...prev.slice(0, -1), errorMessage]); + } finally { + setIsUploading(false); + setLoading(false); + } + }; + + const handleFilePickerClick = () => { + chatInputRef.current?.clickFileInput(); + }; + + const handleFilterSelect = (filter: KnowledgeFilterData | null) => { + // Update conversation-specific filter + setConversationFilter(filter); + setIsFilterHighlighted(false); + }; + + // Auto-focus the input on component mount + useEffect(() => { + chatInputRef.current?.focusInput(); + }, []); + + // Explicitly handle external new conversation trigger + useEffect(() => { + const handleNewConversation = () => { + // Abort any in-flight streaming so it doesn't bleed into new chat + abortStream(); + // Reset chat UI even if context state was already 'new' + setMessages([ + { + role: "assistant", + content: "How can I assist?", + timestamp: new Date(), + }, + ]); + setInput(""); + setExpandedFunctionCalls(new Set()); + setIsFilterHighlighted(false); + setLoading(false); + lastLoadedConversationRef.current = null; + + // Focus input after a short delay to ensure rendering is complete + setTimeout(() => { + chatInputRef.current?.focusInput(); + }, 100); + }; + + const handleFocusInput = () => { + chatInputRef.current?.focusInput(); + }; + + window.addEventListener("newConversation", handleNewConversation); + window.addEventListener("focusInput", handleFocusInput); + return () => { + window.removeEventListener("newConversation", handleNewConversation); + window.removeEventListener("focusInput", handleFocusInput); + }; + }, [abortStream, setLoading]); + + // Load conversation only when user explicitly selects a conversation + useEffect(() => { + // Only load conversation data when: + // 1. conversationData exists AND + // 2. It's different from the last loaded conversation AND + // 3. User is not in the middle of an interaction + if ( + conversationData?.messages && + lastLoadedConversationRef.current !== conversationData.response_id && + !isUserInteracting && + !isForkingInProgress + ) { + console.log( + "Loading conversation with", + conversationData.messages.length, + "messages", + ); + // Convert backend message format to frontend Message interface + const convertedMessages: Message[] = conversationData.messages.map( + (msg: { + role: string; + content: string; + timestamp?: string; + response_id?: string; + chunks?: Array<{ + item?: { + type?: string; + tool_name?: string; + id?: string; + inputs?: unknown; + results?: unknown; + status?: string; + }; + delta?: { + tool_calls?: Array<{ + id?: string; + function?: { name?: string; arguments?: string }; + type?: string; + }>; + }; + type?: string; + result?: unknown; + output?: unknown; + response?: unknown; + }>; + response_data?: unknown; + }) => { + const message: Message = { + role: msg.role as "user" | "assistant", + content: msg.content, + timestamp: new Date(msg.timestamp || new Date()), + }; + + // Extract function calls from chunks or response_data + if (msg.role === "assistant" && (msg.chunks || msg.response_data)) { + const functionCalls: FunctionCall[] = []; + console.log("Processing assistant message for function calls:", { + hasChunks: !!msg.chunks, + chunksLength: msg.chunks?.length, + hasResponseData: !!msg.response_data, + }); + + // Process chunks (streaming data) + if (msg.chunks && Array.isArray(msg.chunks)) { + for (const chunk of msg.chunks) { + // Handle Langflow format: chunks[].item.tool_call + if (chunk.item && chunk.item.type === "tool_call") { + const toolCall = chunk.item; + console.log("Found Langflow tool call:", toolCall); + functionCalls.push({ + id: toolCall.id || "", + name: toolCall.tool_name || "unknown", + arguments: + (toolCall.inputs as Record) || {}, + argumentsString: JSON.stringify(toolCall.inputs || {}), + result: toolCall.results as + | Record + | ToolCallResult[], + status: + (toolCall.status as "pending" | "completed" | "error") || + "completed", + type: "tool_call", + }); + } + // Handle OpenAI format: chunks[].delta.tool_calls + else if (chunk.delta?.tool_calls) { + for (const toolCall of chunk.delta.tool_calls) { + if (toolCall.function) { + functionCalls.push({ + id: toolCall.id || "", + name: toolCall.function.name || "unknown", + arguments: toolCall.function.arguments + ? JSON.parse(toolCall.function.arguments) + : {}, + argumentsString: toolCall.function.arguments || "", + status: "completed", + type: toolCall.type || "function", + }); + } + } + } + // Process tool call results from chunks + if ( + chunk.type === "response.tool_call.result" || + chunk.type === "tool_call_result" + ) { + const lastCall = functionCalls[functionCalls.length - 1]; + if (lastCall) { + lastCall.result = + (chunk.result as + | Record + | ToolCallResult[]) || + (chunk as Record); + lastCall.status = "completed"; + } + } + } + } + + // Process response_data (non-streaming data) + if (msg.response_data && typeof msg.response_data === "object") { + // Look for tool_calls in various places in the response data + const responseData = + typeof msg.response_data === "string" + ? JSON.parse(msg.response_data) + : msg.response_data; + + if ( + responseData.tool_calls && + Array.isArray(responseData.tool_calls) + ) { + for (const toolCall of responseData.tool_calls) { + functionCalls.push({ + id: toolCall.id, + name: toolCall.function?.name || toolCall.name, + arguments: + toolCall.function?.arguments || toolCall.arguments, + argumentsString: + typeof ( + toolCall.function?.arguments || toolCall.arguments + ) === "string" + ? toolCall.function?.arguments || toolCall.arguments + : JSON.stringify( + toolCall.function?.arguments || toolCall.arguments, + ), + result: toolCall.result, + status: "completed", + type: toolCall.type || "function", + }); + } + } + } + + if (functionCalls.length > 0) { + console.log("Setting functionCalls on message:", functionCalls); + message.functionCalls = functionCalls; + } else { + console.log("No function calls found in message"); + } + } + + return message; + }, + ); + + setMessages(convertedMessages); + lastLoadedConversationRef.current = conversationData.response_id; + + // Set the previous response ID for this conversation + setPreviousResponseIds((prev) => ({ + ...prev, + [conversationData.endpoint]: conversationData.response_id, + })); + + // Focus input when loading a conversation + setTimeout(() => { + chatInputRef.current?.focusInput(); + }, 100); + } else if (!conversationData) { + // No conversation selected (new conversation) + lastLoadedConversationRef.current = null; + } + }, [ + conversationData, + isUserInteracting, + isForkingInProgress, + setPreviousResponseIds, + ]); + + // Handle new conversation creation - only reset messages when placeholderConversation is set + useEffect(() => { + if (placeholderConversation && currentConversationId === null) { + console.log("Starting new conversation"); + setMessages([ + { + role: "assistant", + content: "How can I assist?", + timestamp: new Date(), + }, + ]); + lastLoadedConversationRef.current = null; + + // Focus input when starting a new conversation + setTimeout(() => { + chatInputRef.current?.focusInput(); + }, 100); + } + }, [placeholderConversation, currentConversationId]); + + // Listen for file upload events from navigation + useEffect(() => { + const handleFileUploadStart = (event: CustomEvent) => { + const { filename } = event.detail; + console.log("Chat page received file upload start event:", filename); + + setLoading(true); + setIsUploading(true); + setUploadedFile(null); // Clear previous file + }; + + const handleFileUploaded = (event: CustomEvent) => { + const { result } = event.detail; + console.log("Chat page received file upload event:", result); + + setUploadedFile(null); // Clear file after upload + + // Update the response ID for this endpoint + if (result.response_id) { + setPreviousResponseIds((prev) => ({ + ...prev, + [endpoint]: result.response_id, + })); + } + }; + + const handleFileUploadComplete = () => { + console.log("Chat page received file upload complete event"); + setLoading(false); + setIsUploading(false); + }; + + const handleFileUploadError = (event: CustomEvent) => { + const { filename, error } = event.detail; + console.log( + "Chat page received file upload error event:", + filename, + error, + ); + + // Replace the last message with error message + const errorMessage: Message = { + role: "assistant", + content: `❌ Upload failed for **${filename}**: ${error}`, + timestamp: new Date(), + }; + setMessages((prev) => [...prev.slice(0, -1), errorMessage]); + setUploadedFile(null); // Clear file on error + }; + + window.addEventListener( + "fileUploadStart", + handleFileUploadStart as EventListener, + ); + window.addEventListener( + "fileUploaded", + handleFileUploaded as EventListener, + ); + window.addEventListener( + "fileUploadComplete", + handleFileUploadComplete as EventListener, + ); + window.addEventListener( + "fileUploadError", + handleFileUploadError as EventListener, + ); + + return () => { + window.removeEventListener( + "fileUploadStart", + handleFileUploadStart as EventListener, + ); + window.removeEventListener( + "fileUploaded", + handleFileUploaded as EventListener, + ); + window.removeEventListener( + "fileUploadComplete", + handleFileUploadComplete as EventListener, + ); + window.removeEventListener( + "fileUploadError", + handleFileUploadError as EventListener, + ); + }; + }, [endpoint, setPreviousResponseIds, setLoading]); + + // Check if onboarding is complete by looking at local storage + const [isOnboardingComplete, setIsOnboardingComplete] = useState(() => { + if (typeof window === "undefined") return false; + return localStorage.getItem("onboarding-step") === null; + }); + + // Listen for storage changes to detect when onboarding completes + useEffect(() => { + const checkOnboarding = () => { + if (typeof window !== "undefined") { + setIsOnboardingComplete( + localStorage.getItem("onboarding-step") === null, + ); + } + }; + + // Check periodically since storage events don't fire in the same tab + const interval = setInterval(checkOnboarding, 500); + + return () => clearInterval(interval); + }, []); + + // Prepare filters for nudges (same as chat) + const processedFiltersForNudges = parsedFilterData?.filters + ? (() => { + const filters = parsedFilterData.filters; + const processed: SelectedFilters = { + data_sources: [], + document_types: [], + owners: [], + }; + processed.data_sources = filters.data_sources.includes("*") + ? [] + : filters.data_sources; + processed.document_types = filters.document_types.includes("*") + ? [] + : filters.document_types; + processed.owners = filters.owners.includes("*") ? [] : filters.owners; + + const hasFilters = + processed.data_sources.length > 0 || + processed.document_types.length > 0 || + processed.owners.length > 0; + return hasFilters ? processed : undefined; + })() + : undefined; + + const { data: nudges = [], cancel: cancelNudges } = useGetNudgesQuery( + { + chatId: previousResponseIds[endpoint], + filters: processedFiltersForNudges, + limit: parsedFilterData?.limit ?? 3, + scoreThreshold: parsedFilterData?.scoreThreshold ?? 0, + }, + { + enabled: isOnboardingComplete && !isConversationsLoading, // Only fetch nudges after onboarding is complete AND chat history is not loading + }, + ); + + const handleSSEStream = async ( + userMessage: Message, + previousResponseId?: string, + ) => { + // Prepare filters + const processedFilters = parsedFilterData?.filters + ? (() => { + const filters = parsedFilterData.filters; + const processed: SelectedFilters = { + data_sources: [], + document_types: [], + owners: [], + }; + processed.data_sources = filters.data_sources.includes("*") + ? [] + : filters.data_sources; + processed.document_types = filters.document_types.includes("*") + ? [] + : filters.document_types; + processed.owners = filters.owners.includes("*") ? [] : filters.owners; + + const hasFilters = + processed.data_sources.length > 0 || + processed.document_types.length > 0 || + processed.owners.length > 0; + return hasFilters ? processed : undefined; + })() + : undefined; + + // Use passed previousResponseId if available, otherwise fall back to state + const responseIdToUse = previousResponseId || previousResponseIds[endpoint]; + + console.log("[CHAT] Sending streaming message:", { + conversationFilter: conversationFilter?.id, + currentConversationId, + responseIdToUse, + }); + + // Use the hook to send the message + await sendStreamingMessage({ + prompt: userMessage.content, + previousResponseId: responseIdToUse || undefined, + filters: processedFilters, + filter_id: conversationFilter?.id, // ✅ Add filter_id for this conversation + limit: parsedFilterData?.limit ?? 10, + scoreThreshold: parsedFilterData?.scoreThreshold ?? 0, + }); + scrollToBottom({ + animation: "smooth", + duration: 1000, + }); + }; + + const handleSendMessage = async ( + inputMessage: string, + previousResponseId?: string, + ) => { + if (!inputMessage.trim() || loading) return; + + const userMessage: Message = { + role: "user", + content: inputMessage.trim(), + timestamp: new Date(), + }; + + if (messages.length === 1) { + setMessages([userMessage]); + } else { + setMessages((prev) => [...prev, userMessage]); + } + setInput(""); + setLoading(true); + setIsFilterHighlighted(false); + + scrollToBottom({ + animation: "smooth", + duration: 1000, + }); + + if (asyncMode) { + await handleSSEStream(userMessage, previousResponseId); + } else { + // Original non-streaming logic + try { + const apiEndpoint = endpoint === "chat" ? "/api/chat" : "/api/langflow"; + + const requestBody: RequestBody = { + prompt: userMessage.content, + ...(parsedFilterData?.filters && + (() => { + const filters = parsedFilterData.filters; + const processed: SelectedFilters = { + data_sources: [], + document_types: [], + owners: [], + }; + // Only copy non-wildcard arrays + processed.data_sources = filters.data_sources.includes("*") + ? [] + : filters.data_sources; + processed.document_types = filters.document_types.includes("*") + ? [] + : filters.document_types; + processed.owners = filters.owners.includes("*") + ? [] + : filters.owners; + + // Only include filters if any array has values + const hasFilters = + processed.data_sources.length > 0 || + processed.document_types.length > 0 || + processed.owners.length > 0; + return hasFilters ? { filters: processed } : {}; + })()), + limit: parsedFilterData?.limit ?? 10, + scoreThreshold: parsedFilterData?.scoreThreshold ?? 0, + }; + + // Add previous_response_id if we have one for this endpoint + const currentResponseId = previousResponseIds[endpoint]; + if (currentResponseId) { + requestBody.previous_response_id = currentResponseId; + } + + // Add filter_id if a filter is selected for this conversation + if (conversationFilter) { + requestBody.filter_id = conversationFilter.id; + } + + // Debug logging + console.log("[DEBUG] Sending message with:", { + previous_response_id: requestBody.previous_response_id, + filter_id: requestBody.filter_id, + currentConversationId, + previousResponseIds, + }); + + const response = await fetch(apiEndpoint, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify(requestBody), + }); + + const result = await response.json(); + + if (response.ok) { + const assistantMessage: Message = { + role: "assistant", + content: result.response, + timestamp: new Date(), + }; + setMessages((prev) => [...prev, assistantMessage]); + if (result.response_id) { + cancelNudges(); + } + + // Store the response ID if present for this endpoint + if (result.response_id) { + console.log( + "[DEBUG] Received response_id:", + result.response_id, + "currentConversationId:", + currentConversationId, + ); + + setPreviousResponseIds((prev) => ({ + ...prev, + [endpoint]: result.response_id, + })); + + // If this is a new conversation (no currentConversationId), set it now + if (!currentConversationId) { + console.log( + "[DEBUG] Setting currentConversationId to:", + result.response_id, + ); + setCurrentConversationId(result.response_id); + refreshConversations(true); + } else { + console.log( + "[DEBUG] Existing conversation, doing silent refresh", + ); + // For existing conversations, do a silent refresh to keep backend in sync + refreshConversationsSilent(); + } + + // Carry forward the filter association to the new response_id + if (conversationFilter && typeof window !== "undefined") { + const newKey = `conversation_filter_${result.response_id}`; + localStorage.setItem(newKey, conversationFilter.id); + console.log( + "[DEBUG] Saved filter association:", + newKey, + "=", + conversationFilter.id, + ); + } + } + } else { + console.error("Chat failed:", result.error); + // Set chat error flag to trigger test_completion=true on health checks + setChatError(true); + const errorMessage: Message = { + role: "assistant", + content: "Sorry, I encountered an error. Please try again.", + timestamp: new Date(), + }; + setMessages((prev) => [...prev, errorMessage]); + } + } catch (error) { + console.error("Chat error:", error); + // Set chat error flag to trigger test_completion=true on health checks + setChatError(true); + const errorMessage: Message = { + role: "assistant", + content: + "Sorry, I couldn't connect to the chat service. Please try again.", + timestamp: new Date(), + }; + setMessages((prev) => [...prev, errorMessage]); + } + } + + setLoading(false); + }; + + const handleSubmit = async (e: React.FormEvent) => { + e.preventDefault(); + + // Check if there's an uploaded file and upload it first + let uploadedResponseId: string | null = null; + if (uploadedFile) { + // Upload the file first + const responseId = await handleFileUpload(uploadedFile); + // Clear the file after upload + setUploadedFile(null); + + // If the upload resulted in a new conversation, store the response ID + if (responseId) { + uploadedResponseId = responseId; + setPreviousResponseIds((prev) => ({ + ...prev, + [endpoint]: responseId, + })); + } + } + + // Only send message if there's input text + if (input.trim() || uploadedFile) { + // Pass the responseId from upload (if any) to handleSendMessage + handleSendMessage( + !input.trim() ? FILE_CONFIRMATION : input, + uploadedResponseId || undefined, + ); + } + }; + + const toggleFunctionCall = (functionCallId: string) => { + setExpandedFunctionCalls((prev) => { + const newSet = new Set(prev); + if (newSet.has(functionCallId)) { + newSet.delete(functionCallId); + } else { + newSet.add(functionCallId); + } + return newSet; + }); + }; + + const handleForkConversation = ( + messageIndex: number, + event?: React.MouseEvent, + ) => { + // Prevent any default behavior and stop event propagation + if (event) { + event.preventDefault(); + event.stopPropagation(); + } + + // Set interaction state to prevent auto-scroll interference + setIsUserInteracting(true); + setIsForkingInProgress(true); + + console.log("Fork conversation called for message index:", messageIndex); + + // Get messages up to and including the selected assistant message + const messagesToKeep = messages.slice(0, messageIndex + 1); + + // The selected message should be an assistant message (since fork button is only on assistant messages) + const forkedMessage = messages[messageIndex]; + if (forkedMessage.role !== "assistant") { + console.error("Fork button should only be on assistant messages"); + setIsUserInteracting(false); + setIsForkingInProgress(false); + return; + } + + // For forking, we want to continue from the response_id of the assistant message we're forking from + // Since we don't store individual response_ids per message yet, we'll use the current conversation's response_id + // This means we're continuing the conversation thread from that point + const responseIdToForkFrom = + currentConversationId || previousResponseIds[endpoint]; + + // Create a new conversation by properly forking + setMessages(messagesToKeep); + + // Use the chat context's fork method which handles creating a new conversation properly + if (forkFromResponse) { + forkFromResponse(responseIdToForkFrom || ""); + } else { + // Fallback to manual approach + setCurrentConversationId(null); // This creates a new conversation thread + + // Set the response_id we want to continue from as the previous response ID + // This tells the backend to continue the conversation from this point + setPreviousResponseIds((prev) => ({ + ...prev, + [endpoint]: responseIdToForkFrom, + })); + } + + console.log("Forked conversation with", messagesToKeep.length, "messages"); + + // Reset interaction state after a longer delay to ensure all effects complete + setTimeout(() => { + setIsUserInteracting(false); + setIsForkingInProgress(false); + console.log("Fork interaction complete, re-enabling auto effects"); + }, 500); + + // The original conversation remains unchanged in the sidebar + // This new forked conversation will get its own response_id when the user sends the next message + }; + + const handleSuggestionClick = (suggestion: string) => { + handleSendMessage(suggestion); + }; + + return ( + <> + {/* Debug header - only show in debug mode */} + {isDebugMode && ( +
+
+
+ {/* Async Mode Toggle */} +
+ + +
+ {/* Endpoint Toggle */} +
+ + +
+
+
+ )} + + +
+ {messages.length === 0 && !streamingMessage ? ( +
+
+ {isUploading ? ( + <> + +

Processing your document...

+

This may take a few moments

+ + ) : null} +
+
+ ) : ( + <> + {messages.map((message, index) => + message.role === "user" + ? (messages[index]?.content.match(FILES_REGEX)?.[0] ?? + null) === null && ( +
+ = 2 && + (messages[index - 2]?.content.match( + FILES_REGEX, + )?.[0] ?? + undefined) && + message.content === FILE_CONFIRMATION + ? undefined + : message.content + } + files={ + index >= 2 + ? (messages[index - 2]?.content.match( + FILES_REGEX, + )?.[0] ?? undefined) + : undefined + } + /> +
+ ) + : message.role === "assistant" && + (index < 1 || + (messages[index - 1]?.content.match(FILES_REGEX)?.[0] ?? + null) === null) && ( +
+ handleForkConversation(index, e)} + animate={false} + isInactive={index < messages.length - 1} + isInitialGreeting={ + index === 0 && + messages.length === 1 && + message.content === "How can I assist?" + } + /> +
+ ), + )} + + {/* Streaming Message Display */} + {streamingMessage && ( + + )} + + {/* Waiting too long indicator */} + {waitingTooLong && !streamingMessage && loading && ( +
+
+ + The server is taking longer than expected... +
+

+ This may be due to high server load. The request will + timeout after 60 seconds. +

+
+ )} + + )} + {!streamingMessage && ( +
+ +
+ )} +
+
+
+ {/* Input Area - Fixed at bottom */} + { + // Handle backspace for filter clearing + if ( + e.key === "Backspace" && + selectedFilter && + input.trim() === "" + ) { + e.preventDefault(); + if (isFilterHighlighted) { + // Second backspace - remove the filter + setConversationFilter(null); + setIsFilterHighlighted(false); + } else { + // First backspace - highlight the filter + setIsFilterHighlighted(true); + } + return; + } + + // Handle Enter key for form submission + if (e.key === "Enter" && !e.shiftKey) { + e.preventDefault(); + if (input.trim() && !loading) { + // Trigger form submission by finding the form and calling submit + const form = e.currentTarget.closest("form"); + if (form) { + form.requestSubmit(); + } + } + } + }} + onFilterSelect={handleFilterSelect} + onFilePickerClick={handleFilePickerClick} + onFileSelected={setUploadedFile} + setSelectedFilter={setConversationFilter} + setIsFilterHighlighted={setIsFilterHighlighted} + /> +
+ + ); } export default function ProtectedChatPage() { - return ( - -
- - - -
-
- ); + return ( + +
+ + + +
+
+ ); } diff --git a/frontend/app/onboarding/_components/ibm-onboarding.tsx b/frontend/app/onboarding/_components/ibm-onboarding.tsx index 77fef020..023b3467 100644 --- a/frontend/app/onboarding/_components/ibm-onboarding.tsx +++ b/frontend/app/onboarding/_components/ibm-onboarding.tsx @@ -1,13 +1,13 @@ import type { Dispatch, SetStateAction } from "react"; import { useEffect, useState } from "react"; +import IBMLogo from "@/components/icons/ibm-logo"; import { LabelInput } from "@/components/label-input"; import { LabelWrapper } from "@/components/label-wrapper"; -import IBMLogo from "@/components/icons/ibm-logo"; import { Switch } from "@/components/ui/switch"; import { - Tooltip, - TooltipContent, - TooltipTrigger, + Tooltip, + TooltipContent, + TooltipTrigger, } from "@/components/ui/tooltip"; import { useDebouncedValue } from "@/lib/debounce"; import type { OnboardingVariables } from "../../api/mutations/useOnboardingMutation"; @@ -18,273 +18,273 @@ import { AdvancedOnboarding } from "./advanced"; import { ModelSelector } from "./model-selector"; export function IBMOnboarding({ - isEmbedding = false, - setSettings, - sampleDataset, - setSampleDataset, - setIsLoadingModels, - alreadyConfigured = false, - existingEndpoint, - existingProjectId, - hasEnvApiKey = false, + isEmbedding = false, + setSettings, + sampleDataset, + setSampleDataset, + setIsLoadingModels, + alreadyConfigured = false, + existingEndpoint, + existingProjectId, + hasEnvApiKey = false, }: { - isEmbedding?: boolean; - setSettings: Dispatch>; - sampleDataset: boolean; - setSampleDataset: (dataset: boolean) => void; - setIsLoadingModels?: (isLoading: boolean) => void; - alreadyConfigured?: boolean; - existingEndpoint?: string; - existingProjectId?: string; - hasEnvApiKey?: boolean; + isEmbedding?: boolean; + setSettings: Dispatch>; + sampleDataset: boolean; + setSampleDataset: (dataset: boolean) => void; + setIsLoadingModels?: (isLoading: boolean) => void; + alreadyConfigured?: boolean; + existingEndpoint?: string; + existingProjectId?: string; + hasEnvApiKey?: boolean; }) { - const [endpoint, setEndpoint] = useState( - alreadyConfigured ? "" : (existingEndpoint || "https://us-south.ml.cloud.ibm.com"), - ); - const [apiKey, setApiKey] = useState(""); - const [getFromEnv, setGetFromEnv] = useState( - hasEnvApiKey && !alreadyConfigured, - ); - const [projectId, setProjectId] = useState( - alreadyConfigured ? "" : (existingProjectId || ""), - ); + const [endpoint, setEndpoint] = useState( + alreadyConfigured + ? "" + : existingEndpoint || "https://us-south.ml.cloud.ibm.com", + ); + const [apiKey, setApiKey] = useState(""); + const [getFromEnv, setGetFromEnv] = useState( + hasEnvApiKey && !alreadyConfigured, + ); + const [projectId, setProjectId] = useState( + alreadyConfigured ? "" : existingProjectId || "", + ); - const options = [ - { - value: "https://us-south.ml.cloud.ibm.com", - label: "https://us-south.ml.cloud.ibm.com", - default: true, - }, - { - value: "https://eu-de.ml.cloud.ibm.com", - label: "https://eu-de.ml.cloud.ibm.com", - default: false, - }, - { - value: "https://eu-gb.ml.cloud.ibm.com", - label: "https://eu-gb.ml.cloud.ibm.com", - default: false, - }, - { - value: "https://au-syd.ml.cloud.ibm.com", - label: "https://au-syd.ml.cloud.ibm.com", - default: false, - }, - { - value: "https://jp-tok.ml.cloud.ibm.com", - label: "https://jp-tok.ml.cloud.ibm.com", - default: false, - }, - { - value: "https://ca-tor.ml.cloud.ibm.com", - label: "https://ca-tor.ml.cloud.ibm.com", - default: false, - }, - ]; - const debouncedEndpoint = useDebouncedValue(endpoint, 500); - const debouncedApiKey = useDebouncedValue(apiKey, 500); - const debouncedProjectId = useDebouncedValue(projectId, 500); + const options = [ + { + value: "https://us-south.ml.cloud.ibm.com", + label: "https://us-south.ml.cloud.ibm.com", + default: true, + }, + { + value: "https://eu-de.ml.cloud.ibm.com", + label: "https://eu-de.ml.cloud.ibm.com", + default: false, + }, + { + value: "https://eu-gb.ml.cloud.ibm.com", + label: "https://eu-gb.ml.cloud.ibm.com", + default: false, + }, + { + value: "https://au-syd.ml.cloud.ibm.com", + label: "https://au-syd.ml.cloud.ibm.com", + default: false, + }, + { + value: "https://jp-tok.ml.cloud.ibm.com", + label: "https://jp-tok.ml.cloud.ibm.com", + default: false, + }, + { + value: "https://ca-tor.ml.cloud.ibm.com", + label: "https://ca-tor.ml.cloud.ibm.com", + default: false, + }, + ]; + const debouncedEndpoint = useDebouncedValue(endpoint, 500); + const debouncedApiKey = useDebouncedValue(apiKey, 500); + const debouncedProjectId = useDebouncedValue(projectId, 500); - // Fetch models from API when all credentials are provided - const { - data: modelsData, - isLoading: isLoadingModels, - error: modelsError, - } = useGetIBMModelsQuery( - { - endpoint: debouncedEndpoint ? debouncedEndpoint : undefined, - apiKey: getFromEnv ? "" : (debouncedApiKey ? debouncedApiKey : undefined), - projectId: debouncedProjectId ? debouncedProjectId : undefined, - }, - { - enabled: - !!debouncedEndpoint || - !!debouncedApiKey || - !!debouncedProjectId || - getFromEnv || - alreadyConfigured, - }, - ); + // Fetch models from API when all credentials are provided + const { + data: modelsData, + isLoading: isLoadingModels, + error: modelsError, + } = useGetIBMModelsQuery( + { + endpoint: debouncedEndpoint ? debouncedEndpoint : undefined, + apiKey: getFromEnv ? "" : debouncedApiKey ? debouncedApiKey : undefined, + projectId: debouncedProjectId ? debouncedProjectId : undefined, + }, + { + enabled: + (!!debouncedEndpoint && !!debouncedApiKey && !!debouncedProjectId) || + getFromEnv || + alreadyConfigured, + }, + ); - // Use custom hook for model selection logic - const { - languageModel, - embeddingModel, - setLanguageModel, - setEmbeddingModel, - languageModels, - embeddingModels, - } = useModelSelection(modelsData, isEmbedding); + // Use custom hook for model selection logic + const { + languageModel, + embeddingModel, + setLanguageModel, + setEmbeddingModel, + languageModels, + embeddingModels, + } = useModelSelection(modelsData, isEmbedding); - const handleGetFromEnvChange = (fromEnv: boolean) => { - setGetFromEnv(fromEnv); - if (fromEnv) { - setApiKey(""); - } - setEmbeddingModel?.(""); - setLanguageModel?.(""); - }; + const handleGetFromEnvChange = (fromEnv: boolean) => { + setGetFromEnv(fromEnv); + if (fromEnv) { + setApiKey(""); + } + setEmbeddingModel?.(""); + setLanguageModel?.(""); + }; - const handleSampleDatasetChange = (dataset: boolean) => { - setSampleDataset(dataset); - }; + const handleSampleDatasetChange = (dataset: boolean) => { + setSampleDataset(dataset); + }; - useEffect(() => { - setIsLoadingModels?.(isLoadingModels); - }, [isLoadingModels, setIsLoadingModels]); + useEffect(() => { + setIsLoadingModels?.(isLoadingModels); + }, [isLoadingModels, setIsLoadingModels]); - // Update settings when values change - useUpdateSettings( - "watsonx", - { - endpoint, - apiKey, - projectId, - languageModel, - embeddingModel, - }, - setSettings, - isEmbedding, - ); + // Update settings when values change + useUpdateSettings( + "watsonx", + { + endpoint, + apiKey, + projectId, + languageModel, + embeddingModel, + }, + setSettings, + isEmbedding, + ); - return ( - <> -
- -
- {} : setEndpoint} - searchPlaceholder="Search endpoint..." - noOptionsPlaceholder={ - alreadyConfigured - ? "https://•••••••••••••••••••••••••••••••••••••••••" - : "No endpoints available" - } - placeholder="Select endpoint..." - /> - {alreadyConfigured && ( -

- Reusing endpoint from model provider selection. -

- )} -
-
+ return ( + <> +
+ +
+ {} : setEndpoint} + searchPlaceholder="Search endpoint..." + noOptionsPlaceholder={ + alreadyConfigured + ? "https://•••••••••••••••••••••••••••••••••••••••••" + : "No endpoints available" + } + placeholder="Select endpoint..." + /> + {alreadyConfigured && ( +

+ Reusing endpoint from model provider selection. +

+ )} +
+
-
- setProjectId(e.target.value)} - disabled={alreadyConfigured} - /> - {alreadyConfigured && ( -

- Reusing project ID from model provider selection. -

- )} -
- - - -
- -
-
- {!hasEnvApiKey && !alreadyConfigured && ( - - watsonx API key not detected in the environment. - - )} -
-
- {!getFromEnv && !alreadyConfigured && ( -
- setApiKey(e.target.value)} - /> - {isLoadingModels && ( -

- Validating API key... -

- )} - {modelsError && ( -

- Invalid watsonx API key. Verify or replace the key. -

- )} -
- )} - {alreadyConfigured && ( -
- setApiKey(e.target.value)} - disabled={true} - /> -

- Reusing API key from model provider selection. -

-
- )} - {getFromEnv && isLoadingModels && ( -

- Validating configuration... -

- )} - {getFromEnv && modelsError && ( -

- Connection failed. Check your configuration. -

- )} -
- } - languageModels={languageModels} - embeddingModels={embeddingModels} - languageModel={languageModel} - embeddingModel={embeddingModel} - sampleDataset={sampleDataset} - setLanguageModel={setLanguageModel} - setEmbeddingModel={setEmbeddingModel} - setSampleDataset={handleSampleDatasetChange} - /> - - ); +
+ setProjectId(e.target.value)} + disabled={alreadyConfigured} + /> + {alreadyConfigured && ( +

+ Reusing project ID from model provider selection. +

+ )} +
+ + + +
+ +
+
+ {!hasEnvApiKey && !alreadyConfigured && ( + + watsonx API key not detected in the environment. + + )} +
+
+ {!getFromEnv && !alreadyConfigured && ( +
+ setApiKey(e.target.value)} + /> + {isLoadingModels && ( +

+ Validating API key... +

+ )} + {modelsError && ( +

+ Invalid watsonx API key. Verify or replace the key. +

+ )} +
+ )} + {alreadyConfigured && ( +
+ setApiKey(e.target.value)} + disabled={true} + /> +

+ Reusing API key from model provider selection. +

+
+ )} + {getFromEnv && isLoadingModels && ( +

+ Validating configuration... +

+ )} + {getFromEnv && modelsError && ( +

+ Connection failed. Check your configuration. +

+ )} +
+ } + languageModels={languageModels} + embeddingModels={embeddingModels} + languageModel={languageModel} + embeddingModel={embeddingModel} + sampleDataset={sampleDataset} + setLanguageModel={setLanguageModel} + setEmbeddingModel={setEmbeddingModel} + setSampleDataset={handleSampleDatasetChange} + /> + + ); } diff --git a/frontend/app/onboarding/_components/onboarding-card.tsx b/frontend/app/onboarding/_components/onboarding-card.tsx index f82723f1..7baa36a5 100644 --- a/frontend/app/onboarding/_components/onboarding-card.tsx +++ b/frontend/app/onboarding/_components/onboarding-card.tsx @@ -507,7 +507,7 @@ const OnboardingCard = ({ hasEnvApiKey={ currentSettings?.providers?.openai?.has_api_key === true } - alreadyConfigured={providerAlreadyConfigured} + alreadyConfigured={providerAlreadyConfigured && modelProvider === "openai"} /> @@ -517,7 +517,7 @@ const OnboardingCard = ({ setSampleDataset={setSampleDataset} setIsLoadingModels={setIsLoadingModels} isEmbedding={isEmbedding} - alreadyConfigured={providerAlreadyConfigured} + alreadyConfigured={providerAlreadyConfigured && modelProvider === "watsonx"} existingEndpoint={currentSettings?.providers?.watsonx?.endpoint} existingProjectId={currentSettings?.providers?.watsonx?.project_id} hasEnvApiKey={currentSettings?.providers?.watsonx?.has_api_key === true} @@ -530,7 +530,7 @@ const OnboardingCard = ({ setSampleDataset={setSampleDataset} setIsLoadingModels={setIsLoadingModels} isEmbedding={isEmbedding} - alreadyConfigured={providerAlreadyConfigured} + alreadyConfigured={providerAlreadyConfigured && modelProvider === "ollama"} existingEndpoint={currentSettings?.providers?.ollama?.endpoint} /> diff --git a/frontend/app/onboarding/_components/onboarding-upload.tsx b/frontend/app/onboarding/_components/onboarding-upload.tsx index ce7a0f91..a356aa70 100644 --- a/frontend/app/onboarding/_components/onboarding-upload.tsx +++ b/frontend/app/onboarding/_components/onboarding-upload.tsx @@ -1,3 +1,4 @@ +import { X } from "lucide-react"; import { AnimatePresence, motion } from "motion/react"; import { type ChangeEvent, useEffect, useRef, useState } from "react"; import { toast } from "sonner"; @@ -7,242 +8,400 @@ import { useGetTasksQuery } from "@/app/api/queries/useGetTasksQuery"; import { AnimatedProviderSteps } from "@/app/onboarding/_components/animated-provider-steps"; import { Button } from "@/components/ui/button"; import { - ONBOARDING_UPLOAD_STEPS_KEY, - ONBOARDING_USER_DOC_FILTER_ID_KEY, + ONBOARDING_UPLOAD_STEPS_KEY, + ONBOARDING_USER_DOC_FILTER_ID_KEY, } from "@/lib/constants"; import { uploadFile } from "@/lib/upload-utils"; interface OnboardingUploadProps { - onComplete: () => void; + onComplete: () => void; } const OnboardingUpload = ({ onComplete }: OnboardingUploadProps) => { - const fileInputRef = useRef(null); - const [isUploading, setIsUploading] = useState(false); - const [currentStep, setCurrentStep] = useState(null); - const [uploadedFilename, setUploadedFilename] = useState(null); - const [shouldCreateFilter, setShouldCreateFilter] = useState(false); - const [isCreatingFilter, setIsCreatingFilter] = useState(false); + const fileInputRef = useRef(null); + const [isUploading, setIsUploading] = useState(false); + const [currentStep, setCurrentStep] = useState(null); + const [uploadedFilename, setUploadedFilename] = useState(null); + const [uploadedTaskId, setUploadedTaskId] = useState(null); + const [shouldCreateFilter, setShouldCreateFilter] = useState(false); + const [isCreatingFilter, setIsCreatingFilter] = useState(false); + const [error, setError] = useState(null); - const createFilterMutation = useCreateFilter(); + // Track which tasks we've already handled to prevent infinite loops + const handledFailedTasksRef = useRef>(new Set()); - const STEP_LIST = [ - "Uploading your document", - "Generating embeddings", - "Ingesting document", - "Processing your document", - ]; + const createFilterMutation = useCreateFilter(); - // Query tasks to track completion - const { data: tasks } = useGetTasksQuery({ - enabled: currentStep !== null, // Only poll when upload has started - refetchInterval: currentStep !== null ? 1000 : false, // Poll every 1 second during upload - }); + const STEP_LIST = [ + "Uploading your document", + "Generating embeddings", + "Ingesting document", + "Processing your document", + ]; - const { refetch: refetchNudges } = useGetNudgesQuery(null); + // Query tasks to track completion + const { data: tasks } = useGetTasksQuery({ + enabled: currentStep !== null, // Only poll when upload has started + refetchInterval: currentStep !== null ? 1000 : false, // Poll every 1 second during upload + }); - // Monitor tasks and call onComplete when file processing is done - useEffect(() => { - if (currentStep === null || !tasks) { - return; - } + // Monitor tasks and call onComplete when file processing is done + useEffect(() => { + if (currentStep === null || !tasks || !uploadedTaskId) { + return; + } - // Check if there are any active tasks (pending, running, or processing) - const activeTasks = tasks.find( - (task) => - task.status === "pending" || - task.status === "running" || - task.status === "processing", - ); + // Find the task by task ID from the upload response + const matchingTask = tasks.find((task) => task.task_id === uploadedTaskId); - // If no active tasks and we have more than 1 task (initial + new upload), complete it - if ( - (!activeTasks || (activeTasks.processed_files ?? 0) > 0) && - tasks.length > 1 - ) { - // Set to final step to show "Done" - setCurrentStep(STEP_LIST.length); + // If no matching task found, wait for it to appear + if (!matchingTask) { + return; + } - // Create knowledge filter for uploaded document if requested - // Guard against race condition: only create if not already creating - if (shouldCreateFilter && uploadedFilename && !isCreatingFilter) { - // Reset flags immediately (synchronously) to prevent duplicate creation - setShouldCreateFilter(false); - const filename = uploadedFilename; - setUploadedFilename(null); - setIsCreatingFilter(true); + // Skip if this task was already handled as a failed task (from a previous failed upload) + // This prevents processing old failed tasks when a new upload starts + if (handledFailedTasksRef.current.has(matchingTask.task_id)) { + // Check if it's a failed task that we've already handled + const hasFailedFile = + matchingTask.files && + Object.values(matchingTask.files).some( + (file) => file.status === "failed" || file.status === "error", + ); + if (hasFailedFile) { + // This is an old failed task that we've already handled, ignore it + console.log( + "Skipping already-handled failed task:", + matchingTask.task_id, + ); + return; + } + // If it's not a failed task, remove it from handled list (it might have succeeded on retry) + handledFailedTasksRef.current.delete(matchingTask.task_id); + } - // Get display name from filename (remove extension for cleaner name) - const displayName = filename.includes(".") - ? filename.substring(0, filename.lastIndexOf(".")) - : filename; + // Check if any file failed in the matching task + const hasFailedFile = (() => { + // Must have files object + if (!matchingTask.files || typeof matchingTask.files !== "object") { + return false; + } - const queryData = JSON.stringify({ - query: "", - filters: { - data_sources: [filename], - document_types: ["*"], - owners: ["*"], - connector_types: ["*"], - }, - limit: 10, - scoreThreshold: 0, - color: "green", - icon: "file", - }); + const fileEntries = Object.values(matchingTask.files); - createFilterMutation - .mutateAsync({ - name: displayName, - description: `Filter for ${filename}`, - queryData: queryData, - }) - .then((result) => { - if (result.filter?.id && typeof window !== "undefined") { - localStorage.setItem( - ONBOARDING_USER_DOC_FILTER_ID_KEY, - result.filter.id, - ); - console.log( - "Created knowledge filter for uploaded document", - result.filter.id, - ); - } - }) - .catch((error) => { - console.error("Failed to create knowledge filter:", error); - }) - .finally(() => { - setIsCreatingFilter(false); - }); - } + // Must have at least one file + if (fileEntries.length === 0) { + return false; + } - // Refetch nudges to get new ones - refetchNudges(); + // Check if any file has failed status + return fileEntries.some( + (file) => file.status === "failed" || file.status === "error", + ); + })(); - // Wait a bit before completing - setTimeout(() => { - onComplete(); - }, 1000); - } - }, [tasks, currentStep, onComplete, refetchNudges, shouldCreateFilter, uploadedFilename]); + // If any file failed, show error and jump back one step (like onboarding-card.tsx) + // Only handle if we haven't already handled this task + if ( + hasFailedFile && + !isCreatingFilter && + !handledFailedTasksRef.current.has(matchingTask.task_id) + ) { + console.error("File failed in task, jumping back one step", matchingTask); - const resetFileInput = () => { - if (fileInputRef.current) { - fileInputRef.current.value = ""; - } - }; + // Mark this task as handled to prevent infinite loops + handledFailedTasksRef.current.add(matchingTask.task_id); - const handleUploadClick = () => { - fileInputRef.current?.click(); - }; + // Extract error messages from failed files + const errorMessages: string[] = []; + if (matchingTask.files) { + Object.values(matchingTask.files).forEach((file) => { + if ( + (file.status === "failed" || file.status === "error") && + file.error + ) { + errorMessages.push(file.error); + } + }); + } - const performUpload = async (file: File) => { - setIsUploading(true); - try { - setCurrentStep(0); - const result = await uploadFile(file, true, true); // Pass createFilter=true - console.log("Document upload task started successfully"); + // Also check task-level error + if (matchingTask.error) { + errorMessages.push(matchingTask.error); + } - // Store filename and createFilter flag in state to create filter after ingestion succeeds - if (result.createFilter && result.filename) { - setUploadedFilename(result.filename); - setShouldCreateFilter(true); - } + // Use the first error message, or a generic message if no errors found + const errorMessage = + errorMessages.length > 0 + ? errorMessages[0] + : "Document failed to ingest. Please try again with a different file."; - // Move to processing step - task monitoring will handle completion - setTimeout(() => { - setCurrentStep(1); - }, 1500); - } catch (error) { - const errorMessage = error instanceof Error ? error.message : "Upload failed"; - console.error("Upload failed", errorMessage); + // Set error message and jump back one step + setError(errorMessage); + setCurrentStep(STEP_LIST.length); - // Dispatch event that chat context can listen to - // This avoids circular dependency issues - if (typeof window !== "undefined") { - window.dispatchEvent( - new CustomEvent("ingestionFailed", { - detail: { source: "onboarding" }, - }), - ); - } + // Clear filter creation flags since ingestion failed + setShouldCreateFilter(false); + setUploadedFilename(null); - // Show error toast notification - toast.error("Document upload failed", { - description: errorMessage, - duration: 5000, - }); + // Jump back one step after 1 second (go back to upload step) + setTimeout(() => { + setCurrentStep(null); + }, 1000); + return; + } - // Reset on error - setCurrentStep(null); - } finally { - setIsUploading(false); - } - }; + // Check if the matching task is still active (pending, running, or processing) + const isTaskActive = + matchingTask.status === "pending" || + matchingTask.status === "running" || + matchingTask.status === "processing"; - const handleFileChange = async (event: ChangeEvent) => { - const selectedFile = event.target.files?.[0]; - if (!selectedFile) { - resetFileInput(); - return; - } + // If task is completed successfully (no failures) and has processed files, complete the onboarding step + if ( + (!isTaskActive || (matchingTask.processed_files ?? 0) > 0) && + !hasFailedFile + ) { + // Set to final step to show "Done" + setCurrentStep(STEP_LIST.length); - try { - await performUpload(selectedFile); - } catch (error) { - console.error( - "Unable to prepare file for upload", - (error as Error).message, - ); - } finally { - resetFileInput(); - } - }; + // Create knowledge filter for uploaded document if requested + // Guard against race condition: only create if not already creating + if (shouldCreateFilter && uploadedFilename && !isCreatingFilter) { + // Reset flags immediately (synchronously) to prevent duplicate creation + setShouldCreateFilter(false); + const filename = uploadedFilename; + setUploadedFilename(null); + setIsCreatingFilter(true); - return ( - - {currentStep === null ? ( - - - - - ) : ( - - - - )} - - ); + // Get display name from filename (remove extension for cleaner name) + const displayName = filename.includes(".") + ? filename.substring(0, filename.lastIndexOf(".")) + : filename; + + const queryData = JSON.stringify({ + query: "", + filters: { + data_sources: [filename], + document_types: ["*"], + owners: ["*"], + connector_types: ["*"], + }, + limit: 10, + scoreThreshold: 0, + color: "green", + icon: "file", + }); + + // Wait for filter creation to complete before proceeding + createFilterMutation + .mutateAsync({ + name: displayName, + description: `Filter for ${filename}`, + queryData: queryData, + }) + .then((result) => { + if (result.filter?.id && typeof window !== "undefined") { + localStorage.setItem( + ONBOARDING_USER_DOC_FILTER_ID_KEY, + result.filter.id, + ); + console.log( + "Created knowledge filter for uploaded document", + result.filter.id, + ); + } + }) + .catch((error) => { + console.error("Failed to create knowledge filter:", error); + }) + .finally(() => { + setIsCreatingFilter(false); + + // Wait a bit before completing (after filter is created) + setTimeout(() => { + onComplete(); + }, 1000); + }); + } else { + // No filter to create, just complete + + // Wait a bit before completing + setTimeout(() => { + onComplete(); + }, 1000); + } + } + }, [ + tasks, + currentStep, + onComplete, + shouldCreateFilter, + uploadedFilename, + uploadedTaskId, + createFilterMutation, + isCreatingFilter, + ]); + + const resetFileInput = () => { + if (fileInputRef.current) { + fileInputRef.current.value = ""; + } + }; + + const handleUploadClick = () => { + // Clear any previous error when user clicks to upload again + setError(null); + fileInputRef.current?.click(); + }; + + const performUpload = async (file: File) => { + setIsUploading(true); + // Clear any previous error when starting a new upload + setError(null); + // Clear handled tasks ref to allow retry + handledFailedTasksRef.current.clear(); + // Reset task ID to prevent matching old failed tasks + setUploadedTaskId(null); + // Clear filter creation flags + setShouldCreateFilter(false); + setUploadedFilename(null); + + try { + setCurrentStep(0); + const result = await uploadFile(file, true, true); // Pass createFilter=true + console.log("Document upload task started successfully"); + + // Store task ID to track the specific upload task + if (result.taskId) { + setUploadedTaskId(result.taskId); + } + + // Store filename and createFilter flag in state to create filter after ingestion succeeds + if (result.createFilter && result.filename) { + setUploadedFilename(result.filename); + setShouldCreateFilter(true); + } + + // Move to processing step - task monitoring will handle completion + setTimeout(() => { + setCurrentStep(1); + }, 1500); + } catch (error) { + const errorMessage = + error instanceof Error ? error.message : "Upload failed"; + console.error("Upload failed", errorMessage); + + // Dispatch event that chat context can listen to + // This avoids circular dependency issues + if (typeof window !== "undefined") { + window.dispatchEvent( + new CustomEvent("ingestionFailed", { + detail: { source: "onboarding" }, + }), + ); + } + + // Show error toast notification + toast.error("Document upload failed", { + description: errorMessage, + duration: 5000, + }); + + // Reset on error + setCurrentStep(null); + setUploadedTaskId(null); + setError(errorMessage); + setShouldCreateFilter(false); + setUploadedFilename(null); + } finally { + setIsUploading(false); + } + }; + + const handleFileChange = async (event: ChangeEvent) => { + const selectedFile = event.target.files?.[0]; + if (!selectedFile) { + resetFileInput(); + return; + } + + try { + await performUpload(selectedFile); + } catch (error) { + console.error( + "Unable to prepare file for upload", + (error as Error).message, + ); + } finally { + resetFileInput(); + } + }; + + return ( + + {currentStep === null ? ( + +
+ + {error && ( + +
+ + + {error} + +
+
+ )} +
+
+ +
+ +
+
+ ) : ( + + + + )} +
+ ); }; export default OnboardingUpload; diff --git a/frontend/components/chat-renderer.tsx b/frontend/components/chat-renderer.tsx index 6804b065..cceb99c7 100644 --- a/frontend/components/chat-renderer.tsx +++ b/frontend/components/chat-renderer.tsx @@ -47,8 +47,7 @@ export function ChatRenderer({ refreshConversations, startNewConversation, setConversationFilter, - setCurrentConversationId, - setPreviousResponseIds, + setOnboardingComplete, } = useChat(); // Initialize onboarding state based on local storage and settings @@ -170,6 +169,9 @@ export function ChatRenderer({ localStorage.removeItem(ONBOARDING_UPLOAD_STEPS_KEY); } + // Mark onboarding as complete in context + setOnboardingComplete(true); + // Clear ALL conversation state so next message starts fresh await startNewConversation(); @@ -202,6 +204,8 @@ export function ChatRenderer({ localStorage.removeItem(ONBOARDING_CARD_STEPS_KEY); localStorage.removeItem(ONBOARDING_UPLOAD_STEPS_KEY); } + // Mark onboarding as complete in context + setOnboardingComplete(true); // Store the OpenRAG docs filter as default for new conversations storeDefaultFilterForNewConversations(false); setShowLayout(true); diff --git a/frontend/components/provider-health-banner.tsx b/frontend/components/provider-health-banner.tsx index 1a91a601..3bf5cfb1 100644 --- a/frontend/components/provider-health-banner.tsx +++ b/frontend/components/provider-health-banner.tsx @@ -5,125 +5,131 @@ import { useRouter } from "next/navigation"; import { useProviderHealthQuery } from "@/app/api/queries/useProviderHealthQuery"; import type { ModelProvider } from "@/app/settings/_helpers/model-helpers"; import { Banner, BannerIcon, BannerTitle } from "@/components/ui/banner"; -import { cn } from "@/lib/utils"; import { useChat } from "@/contexts/chat-context"; +import { cn } from "@/lib/utils"; import { Button } from "./ui/button"; interface ProviderHealthBannerProps { - className?: string; + className?: string; } // Custom hook to check provider health status export function useProviderHealth() { - const { hasChatError } = useChat(); - const { - data: health, - isLoading, - isFetching, - error, - isError, - } = useProviderHealthQuery({ - test_completion: hasChatError, // Use test_completion=true when chat errors occur - }); + const { hasChatError } = useChat(); + const { + data: health, + isLoading, + isFetching, + error, + isError, + } = useProviderHealthQuery({ + test_completion: hasChatError, // Use test_completion=true when chat errors occur + }); - const isHealthy = health?.status === "healthy" && !isError; - // Only consider unhealthy if backend is up but provider validation failed - // Don't show banner if backend is unavailable - const isUnhealthy = - health?.status === "unhealthy" || health?.status === "error"; - const isBackendUnavailable = - health?.status === "backend-unavailable" || isError; + const isHealthy = health?.status === "healthy" && !isError; + // Only consider unhealthy if backend is up but provider validation failed + // Don't show banner if backend is unavailable + const isUnhealthy = + health?.status === "unhealthy" || health?.status === "error"; + const isBackendUnavailable = + health?.status === "backend-unavailable" || isError; - return { - health, - isLoading, - isFetching, - error, - isError, - isHealthy, - isUnhealthy, - isBackendUnavailable, - }; + return { + health, + isLoading, + isFetching, + error, + isError, + isHealthy, + isUnhealthy, + isBackendUnavailable, + }; } const providerTitleMap: Record = { - openai: "OpenAI", - anthropic: "Anthropic", - ollama: "Ollama", - watsonx: "IBM watsonx.ai", + openai: "OpenAI", + anthropic: "Anthropic", + ollama: "Ollama", + watsonx: "IBM watsonx.ai", }; export function ProviderHealthBanner({ className }: ProviderHealthBannerProps) { - const { isLoading, isHealthy, isUnhealthy, health } = useProviderHealth(); - const router = useRouter(); + const { isLoading, isHealthy, isUnhealthy, health } = useProviderHealth(); + const router = useRouter(); - // Only show banner when provider is unhealthy (not when backend is unavailable) - if (isLoading || isHealthy) { - return null; - } + // Only show banner when provider is unhealthy (not when backend is unavailable) + if (isLoading || isHealthy) { + return null; + } - if (isUnhealthy) { - const llmProvider = health?.llm_provider || health?.provider; - const embeddingProvider = health?.embedding_provider; - const llmError = health?.llm_error; - const embeddingError = health?.embedding_error; + if (isUnhealthy) { + const llmProvider = health?.llm_provider || health?.provider; + const embeddingProvider = health?.embedding_provider; + const llmError = health?.llm_error; + const embeddingError = health?.embedding_error; - // Determine which provider has the error - let errorProvider: string | undefined; - let errorMessage: string; + // Determine which provider has the error + let errorProvider: string | undefined; + let errorMessage: string; - if (llmError && embeddingError) { - // Both have errors - show combined message - errorMessage = health?.message || "Provider validation failed"; - errorProvider = undefined; // Don't link to a specific provider - } else if (llmError) { - // Only LLM has error - errorProvider = llmProvider; - errorMessage = llmError; - } else if (embeddingError) { - // Only embedding has error - errorProvider = embeddingProvider; - errorMessage = embeddingError; - } else { - // Fallback to original message - errorMessage = health?.message || "Provider validation failed"; - errorProvider = llmProvider; - } + if (llmError && embeddingError) { + // Both have errors - check if they're the same + if (llmError === embeddingError) { + // Same error for both - show once + errorMessage = llmError; + } else { + // Different errors - show both + errorMessage = `${llmError}; ${embeddingError}`; + } + errorProvider = undefined; // Don't link to a specific provider + } else if (llmError) { + // Only LLM has error + errorProvider = llmProvider; + errorMessage = llmError; + } else if (embeddingError) { + // Only embedding has error + errorProvider = embeddingProvider; + errorMessage = embeddingError; + } else { + // Fallback to original message + errorMessage = health?.message || "Provider validation failed"; + errorProvider = llmProvider; + } - const providerTitle = errorProvider - ? providerTitleMap[errorProvider as ModelProvider] || errorProvider - : "Provider"; + const providerTitle = errorProvider + ? providerTitleMap[errorProvider as ModelProvider] || errorProvider + : "Provider"; - const settingsUrl = errorProvider - ? `/settings?setup=${errorProvider}` - : "/settings"; + const settingsUrl = errorProvider + ? `/settings?setup=${errorProvider}` + : "/settings"; - return ( - - - - {llmError && embeddingError ? ( - <>Provider errors - {errorMessage} - ) : ( - <> - {providerTitle} error - {errorMessage} - - )} - - - - ); - } + return ( + + + + {llmError && embeddingError ? ( + <>Provider errors - {errorMessage} + ) : ( + <> + {providerTitle} error - {errorMessage} + + )} + + + + ); + } - return null; + return null; } diff --git a/frontend/contexts/chat-context.tsx b/frontend/contexts/chat-context.tsx index 59b5edeb..611c3324 100644 --- a/frontend/contexts/chat-context.tsx +++ b/frontend/contexts/chat-context.tsx @@ -10,6 +10,7 @@ import { useRef, useState, } from "react"; +import { ONBOARDING_STEP_KEY } from "@/lib/constants"; export type EndpointType = "chat" | "langflow"; @@ -81,6 +82,8 @@ interface ChatContextType { setConversationFilter: (filter: KnowledgeFilter | null, responseId?: string | null) => void; hasChatError: boolean; setChatError: (hasError: boolean) => void; + isOnboardingComplete: boolean; + setOnboardingComplete: (complete: boolean) => void; } const ChatContext = createContext(undefined); @@ -111,6 +114,37 @@ export function ChatProvider({ children }: ChatProviderProps) { const [conversationFilter, setConversationFilterState] = useState(null); const [hasChatError, setChatError] = useState(false); + + // Check if onboarding is complete (onboarding step key should be null) + const [isOnboardingComplete, setIsOnboardingComplete] = useState(() => { + if (typeof window === "undefined") return false; + return localStorage.getItem(ONBOARDING_STEP_KEY) === null; + }); + + // Sync onboarding completion state with localStorage + useEffect(() => { + const checkOnboarding = () => { + if (typeof window !== "undefined") { + setIsOnboardingComplete( + localStorage.getItem(ONBOARDING_STEP_KEY) === null, + ); + } + }; + + // Check on mount + checkOnboarding(); + + // Listen for storage events (for cross-tab sync) + window.addEventListener("storage", checkOnboarding); + + return () => { + window.removeEventListener("storage", checkOnboarding); + }; + }, []); + + const setOnboardingComplete = useCallback((complete: boolean) => { + setIsOnboardingComplete(complete); + }, []); // Listen for ingestion failures and set chat error flag useEffect(() => { @@ -375,6 +409,8 @@ export function ChatProvider({ children }: ChatProviderProps) { setConversationFilter, hasChatError, setChatError, + isOnboardingComplete, + setOnboardingComplete, }), [ endpoint, @@ -396,6 +432,8 @@ export function ChatProvider({ children }: ChatProviderProps) { conversationFilter, setConversationFilter, hasChatError, + isOnboardingComplete, + setOnboardingComplete, ], ); diff --git a/frontend/lib/upload-utils.ts b/frontend/lib/upload-utils.ts index 9892bde7..ad09bb3b 100644 --- a/frontend/lib/upload-utils.ts +++ b/frontend/lib/upload-utils.ts @@ -12,6 +12,7 @@ export interface UploadFileResult { raw: unknown; createFilter?: boolean; filename?: string; + taskId?: string; } export async function duplicateCheck( @@ -158,6 +159,7 @@ export async function uploadFile( (uploadIngestJson as { upload?: { id?: string } }).upload?.id || (uploadIngestJson as { id?: string }).id || (uploadIngestJson as { task_id?: string }).task_id; + const taskId = (uploadIngestJson as { task_id?: string }).task_id; const filePath = (uploadIngestJson as { upload?: { path?: string } }).upload?.path || (uploadIngestJson as { path?: string }).path || @@ -197,6 +199,7 @@ export async function uploadFile( raw: uploadIngestJson, createFilter: shouldCreateFilter, filename, + taskId, }; return result; diff --git a/src/api/provider_validation.py b/src/api/provider_validation.py index c4307003..813826a1 100644 --- a/src/api/provider_validation.py +++ b/src/api/provider_validation.py @@ -1,5 +1,6 @@ """Provider validation utilities for testing API keys and models during onboarding.""" +import json import httpx from utils.container_utils import transform_localhost_url from utils.logging_config import get_logger @@ -7,6 +8,106 @@ from utils.logging_config import get_logger logger = get_logger(__name__) +def _parse_json_error_message(error_text: str) -> str: + """Parse JSON error message and extract just the message field.""" + try: + # Try to parse as JSON + error_data = json.loads(error_text) + + if isinstance(error_data, dict): + # WatsonX format: {"errors": [{"code": "...", "message": "..."}], ...} + if "errors" in error_data and isinstance(error_data["errors"], list): + errors = error_data["errors"] + if len(errors) > 0 and isinstance(errors[0], dict): + message = errors[0].get("message", "") + if message: + return message + code = errors[0].get("code", "") + if code: + return f"Error: {code}" + + # OpenAI format: {"error": {"message": "...", "type": "...", "code": "..."}} + if "error" in error_data: + error_obj = error_data["error"] + if isinstance(error_obj, dict): + message = error_obj.get("message", "") + if message: + return message + + # Direct message field + if "message" in error_data: + return error_data["message"] + + # Generic format: {"detail": "..."} + if "detail" in error_data: + return error_data["detail"] + except (json.JSONDecodeError, ValueError, TypeError): + pass + + # Return original text if not JSON or can't parse + return error_text + + +def _extract_error_details(response: httpx.Response) -> str: + """Extract detailed error message from API response.""" + try: + # Try to parse JSON error response + error_data = response.json() + + # Common error response formats + if isinstance(error_data, dict): + # WatsonX format: {"errors": [{"code": "...", "message": "..."}], ...} + if "errors" in error_data and isinstance(error_data["errors"], list): + errors = error_data["errors"] + if len(errors) > 0 and isinstance(errors[0], dict): + # Extract just the message from the first error + message = errors[0].get("message", "") + if message: + return message + # Fallback to code if no message + code = errors[0].get("code", "") + if code: + return f"Error: {code}" + + # OpenAI format: {"error": {"message": "...", "type": "...", "code": "..."}} + if "error" in error_data: + error_obj = error_data["error"] + if isinstance(error_obj, dict): + message = error_obj.get("message", "") + error_type = error_obj.get("type", "") + code = error_obj.get("code", "") + if message: + details = message + if error_type: + details += f" (type: {error_type})" + if code: + details += f" (code: {code})" + return details + + # Anthropic format: {"error": {"message": "...", "type": "..."}} + if "message" in error_data: + return error_data["message"] + + # Generic format: {"message": "..."} + if "detail" in error_data: + return error_data["detail"] + + # If JSON parsing worked but no structured error found, try parsing text + response_text = response.text[:500] + parsed = _parse_json_error_message(response_text) + if parsed != response_text: + return parsed + return response_text + + except (json.JSONDecodeError, ValueError): + # If JSON parsing fails, try parsing the text as JSON string + response_text = response.text[:500] if response.text else f"HTTP {response.status_code}" + parsed = _parse_json_error_message(response_text) + if parsed != response_text: + return parsed + return response_text + + async def validate_provider_setup( provider: str, api_key: str = None, @@ -30,7 +131,7 @@ async def validate_provider_setup( If False, performs lightweight validation (no credits consumed). Default: False. Raises: - Exception: If validation fails with message "Setup failed, please try again or select a different provider." + Exception: If validation fails, raises the original exception with the actual error message. """ provider_lower = provider.lower() @@ -70,7 +171,8 @@ async def validate_provider_setup( except Exception as e: logger.error(f"Validation failed for provider {provider_lower}: {str(e)}") - raise Exception("Setup failed, please try again or select a different provider.") + # Preserve the original error message instead of replacing it with a generic one + raise async def test_lightweight_health( @@ -155,8 +257,9 @@ async def _test_openai_lightweight_health(api_key: str) -> None: ) if response.status_code != 200: - logger.error(f"OpenAI lightweight health check failed: {response.status_code}") - raise Exception(f"OpenAI API key validation failed: {response.status_code}") + error_details = _extract_error_details(response) + logger.error(f"OpenAI lightweight health check failed: {response.status_code} - {error_details}") + raise Exception(f"OpenAI API key validation failed: {error_details}") logger.info("OpenAI lightweight health check passed") @@ -225,8 +328,9 @@ async def _test_openai_completion_with_tools(api_key: str, llm_model: str) -> No ) if response.status_code != 200: - logger.error(f"OpenAI completion test failed: {response.status_code} - {response.text}") - raise Exception(f"OpenAI API error: {response.status_code}") + error_details = _extract_error_details(response) + logger.error(f"OpenAI completion test failed: {response.status_code} - {error_details}") + raise Exception(f"OpenAI API error: {error_details}") logger.info("OpenAI completion with tool calling test passed") @@ -260,8 +364,9 @@ async def _test_openai_embedding(api_key: str, embedding_model: str) -> None: ) if response.status_code != 200: - logger.error(f"OpenAI embedding test failed: {response.status_code} - {response.text}") - raise Exception(f"OpenAI API error: {response.status_code}") + error_details = _extract_error_details(response) + logger.error(f"OpenAI embedding test failed: {response.status_code} - {error_details}") + raise Exception(f"OpenAI API error: {error_details}") data = response.json() if not data.get("data") or len(data["data"]) == 0: @@ -300,8 +405,9 @@ async def _test_watsonx_lightweight_health( ) if token_response.status_code != 200: - logger.error(f"IBM IAM token request failed: {token_response.status_code}") - raise Exception("Failed to authenticate with IBM Watson - invalid API key") + error_details = _extract_error_details(token_response) + logger.error(f"IBM IAM token request failed: {token_response.status_code} - {error_details}") + raise Exception(f"Failed to authenticate with IBM Watson: {error_details}") bearer_token = token_response.json().get("access_token") if not bearer_token: @@ -335,8 +441,9 @@ async def _test_watsonx_completion_with_tools( ) if token_response.status_code != 200: - logger.error(f"IBM IAM token request failed: {token_response.status_code}") - raise Exception("Failed to authenticate with IBM Watson") + error_details = _extract_error_details(token_response) + logger.error(f"IBM IAM token request failed: {token_response.status_code} - {error_details}") + raise Exception(f"Failed to authenticate with IBM Watson: {error_details}") bearer_token = token_response.json().get("access_token") if not bearer_token: @@ -388,8 +495,11 @@ async def _test_watsonx_completion_with_tools( ) if response.status_code != 200: - logger.error(f"IBM Watson completion test failed: {response.status_code} - {response.text}") - raise Exception(f"IBM Watson API error: {response.status_code}") + error_details = _extract_error_details(response) + logger.error(f"IBM Watson completion test failed: {response.status_code} - {error_details}") + # If error_details is still JSON, parse it to extract just the message + parsed_details = _parse_json_error_message(error_details) + raise Exception(f"IBM Watson API error: {parsed_details}") logger.info("IBM Watson completion with tool calling test passed") @@ -398,6 +508,13 @@ async def _test_watsonx_completion_with_tools( raise Exception("Request timed out") except Exception as e: logger.error(f"IBM Watson completion test failed: {str(e)}") + # If the error message contains JSON, parse it to extract just the message + error_str = str(e) + if "IBM Watson API error: " in error_str: + json_part = error_str.split("IBM Watson API error: ", 1)[1] + parsed_message = _parse_json_error_message(json_part) + if parsed_message != json_part: + raise Exception(f"IBM Watson API error: {parsed_message}") raise @@ -419,8 +536,9 @@ async def _test_watsonx_embedding( ) if token_response.status_code != 200: - logger.error(f"IBM IAM token request failed: {token_response.status_code}") - raise Exception("Failed to authenticate with IBM Watson") + error_details = _extract_error_details(token_response) + logger.error(f"IBM IAM token request failed: {token_response.status_code} - {error_details}") + raise Exception(f"Failed to authenticate with IBM Watson: {error_details}") bearer_token = token_response.json().get("access_token") if not bearer_token: @@ -450,8 +568,11 @@ async def _test_watsonx_embedding( ) if response.status_code != 200: - logger.error(f"IBM Watson embedding test failed: {response.status_code} - {response.text}") - raise Exception(f"IBM Watson API error: {response.status_code}") + error_details = _extract_error_details(response) + logger.error(f"IBM Watson embedding test failed: {response.status_code} - {error_details}") + # If error_details is still JSON, parse it to extract just the message + parsed_details = _parse_json_error_message(error_details) + raise Exception(f"IBM Watson API error: {parsed_details}") data = response.json() if not data.get("results") or len(data["results"]) == 0: @@ -464,6 +585,13 @@ async def _test_watsonx_embedding( raise Exception("Request timed out") except Exception as e: logger.error(f"IBM Watson embedding test failed: {str(e)}") + # If the error message contains JSON, parse it to extract just the message + error_str = str(e) + if "IBM Watson API error: " in error_str: + json_part = error_str.split("IBM Watson API error: ", 1)[1] + parsed_message = _parse_json_error_message(json_part) + if parsed_message != json_part: + raise Exception(f"IBM Watson API error: {parsed_message}") raise @@ -483,8 +611,9 @@ async def _test_ollama_lightweight_health(endpoint: str) -> None: ) if response.status_code != 200: - logger.error(f"Ollama lightweight health check failed: {response.status_code}") - raise Exception(f"Ollama endpoint not responding: {response.status_code}") + error_details = _extract_error_details(response) + logger.error(f"Ollama lightweight health check failed: {response.status_code} - {error_details}") + raise Exception(f"Ollama endpoint not responding: {error_details}") logger.info("Ollama lightweight health check passed") @@ -537,8 +666,9 @@ async def _test_ollama_completion_with_tools(llm_model: str, endpoint: str) -> N ) if response.status_code != 200: - logger.error(f"Ollama completion test failed: {response.status_code} - {response.text}") - raise Exception(f"Ollama API error: {response.status_code}") + error_details = _extract_error_details(response) + logger.error(f"Ollama completion test failed: {response.status_code} - {error_details}") + raise Exception(f"Ollama API error: {error_details}") logger.info("Ollama completion with tool calling test passed") @@ -569,8 +699,9 @@ async def _test_ollama_embedding(embedding_model: str, endpoint: str) -> None: ) if response.status_code != 200: - logger.error(f"Ollama embedding test failed: {response.status_code} - {response.text}") - raise Exception(f"Ollama API error: {response.status_code}") + error_details = _extract_error_details(response) + logger.error(f"Ollama embedding test failed: {response.status_code} - {error_details}") + raise Exception(f"Ollama API error: {error_details}") data = response.json() if not data.get("embedding"): @@ -616,8 +747,9 @@ async def _test_anthropic_lightweight_health(api_key: str) -> None: ) if response.status_code != 200: - logger.error(f"Anthropic lightweight health check failed: {response.status_code}") - raise Exception(f"Anthropic API key validation failed: {response.status_code}") + error_details = _extract_error_details(response) + logger.error(f"Anthropic lightweight health check failed: {response.status_code} - {error_details}") + raise Exception(f"Anthropic API key validation failed: {error_details}") logger.info("Anthropic lightweight health check passed") @@ -672,8 +804,9 @@ async def _test_anthropic_completion_with_tools(api_key: str, llm_model: str) -> ) if response.status_code != 200: - logger.error(f"Anthropic completion test failed: {response.status_code} - {response.text}") - raise Exception(f"Anthropic API error: {response.status_code}") + error_details = _extract_error_details(response) + logger.error(f"Anthropic completion test failed: {response.status_code} - {error_details}") + raise Exception(f"Anthropic API error: {error_details}") logger.info("Anthropic completion with tool calling test passed")