"use client"; import { useQueryClient } from "@tanstack/react-query"; import { AnimatePresence, motion } from "framer-motion"; import { X } from "lucide-react"; import { useEffect, useRef, useState } from "react"; import { toast } from "sonner"; import { type OnboardingVariables, useOnboardingMutation, } from "@/app/api/mutations/useOnboardingMutation"; import { useOnboardingRollbackMutation } from "@/app/api/mutations/useOnboardingRollbackMutation"; import { useGetSettingsQuery } from "@/app/api/queries/useGetSettingsQuery"; import { useGetTasksQuery } from "@/app/api/queries/useGetTasksQuery"; import type { ProviderHealthResponse } from "@/app/api/queries/useProviderHealthQuery"; import { useDoclingHealth } from "@/components/docling-health-banner"; import AnthropicLogo from "@/components/icons/anthropic-logo"; import IBMLogo from "@/components/icons/ibm-logo"; import OllamaLogo from "@/components/icons/ollama-logo"; import OpenAILogo from "@/components/icons/openai-logo"; import { Button } from "@/components/ui/button"; import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs"; import { Tooltip, TooltipContent, TooltipTrigger, } from "@/components/ui/tooltip"; import { ONBOARDING_CARD_STEPS_KEY } from "@/lib/constants"; import { cn } from "@/lib/utils"; import { AnimatedProviderSteps } from "./animated-provider-steps"; import { AnthropicOnboarding } from "./anthropic-onboarding"; import { IBMOnboarding } from "./ibm-onboarding"; import { OllamaOnboarding } from "./ollama-onboarding"; import { OpenAIOnboarding } from "./openai-onboarding"; import { TabTrigger } from "./tab-trigger"; interface OnboardingCardProps { onComplete: () => void; isCompleted?: boolean; isEmbedding?: boolean; setIsLoadingModels?: (isLoading: boolean) => void; setLoadingStatus?: (status: string[]) => void; } const STEP_LIST = [ "Setting up your model provider", "Defining schema", "Configuring Langflow", ]; const EMBEDDING_STEP_LIST = [ "Setting up your model provider", "Defining schema", "Configuring Langflow", "Ingesting sample data", ]; const OnboardingCard = ({ onComplete, isEmbedding = false, isCompleted = false, }: OnboardingCardProps) => { const { isHealthy: isDoclingHealthy } = useDoclingHealth(); const [modelProvider, setModelProvider] = useState( isEmbedding ? "openai" : "anthropic", ); const [sampleDataset, setSampleDataset] = useState(true); const [isLoadingModels, setIsLoadingModels] = useState(false); const queryClient = useQueryClient(); // Fetch current settings to check if providers are already configured const { data: currentSettings } = useGetSettingsQuery(); // Auto-select the first provider that has an API key set in env vars useEffect(() => { if (!currentSettings?.providers) return; // Define provider order based on whether it's embedding or not const providerOrder = isEmbedding ? ["openai", "watsonx", "ollama"] : ["anthropic", "openai", "watsonx", "ollama"]; // Find the first provider with an API key for (const provider of providerOrder) { if ( provider === "anthropic" && currentSettings.providers.anthropic?.has_api_key ) { setModelProvider("anthropic"); return; } else if (provider === "openai" && currentSettings.providers.openai?.has_api_key) { setModelProvider("openai"); return; } else if ( provider === "watsonx" && currentSettings.providers.watsonx?.has_api_key ) { setModelProvider("watsonx"); return; } else if ( provider === "ollama" && currentSettings.providers.ollama?.endpoint ) { setModelProvider("ollama"); return; } } }, [currentSettings, isEmbedding]); const handleSetModelProvider = (provider: string) => { setIsLoadingModels(false); setModelProvider(provider); setSettings({ [isEmbedding ? "embedding_provider" : "llm_provider"]: provider, embedding_model: "", llm_model: "", }); setError(null); }; // Check if the selected provider is already configured const isProviderAlreadyConfigured = (provider: string): boolean => { if (!isEmbedding || !currentSettings?.providers) return false; // Check if provider has been explicitly configured (not just from env vars) if (provider === "openai") { return currentSettings.providers.openai?.configured === true; } else if (provider === "anthropic") { return currentSettings.providers.anthropic?.configured === true; } else if (provider === "watsonx") { return currentSettings.providers.watsonx?.configured === true; } else if (provider === "ollama") { return currentSettings.providers.ollama?.configured === true; } return false; }; const showProviderConfiguredMessage = isProviderAlreadyConfigured(modelProvider); const providerAlreadyConfigured = isEmbedding && showProviderConfiguredMessage; const totalSteps = isEmbedding ? EMBEDDING_STEP_LIST.length : STEP_LIST.length; const [settings, setSettings] = useState({ [isEmbedding ? "embedding_provider" : "llm_provider"]: modelProvider, embedding_model: "", llm_model: "", // Provider-specific fields will be set by provider components openai_api_key: "", anthropic_api_key: "", watsonx_api_key: "", watsonx_endpoint: "", watsonx_project_id: "", ollama_endpoint: "", }); const [currentStep, setCurrentStep] = useState( isCompleted ? totalSteps : null, ); const [processingStartTime, setProcessingStartTime] = useState( null, ); const [error, setError] = useState(null); // Track which tasks we've already handled to prevent infinite loops const handledFailedTasksRef = useRef>(new Set()); // Query tasks to track completion const { data: tasks } = useGetTasksQuery({ enabled: currentStep !== null, // Only poll when onboarding has started refetchInterval: currentStep !== null ? 1000 : false, // Poll every 1 second during onboarding }); // Rollback mutation const rollbackMutation = useOnboardingRollbackMutation({ onSuccess: () => { console.log("Onboarding rolled back successfully"); // Reset to provider selection step // Error message is already set before calling mutate setCurrentStep(null); }, onError: (error) => { console.error("Failed to rollback onboarding", error); // Preserve existing error message if set, otherwise show rollback error setError((prevError) => prevError || `Failed to rollback: ${error.message}`); // Still reset to provider selection even if rollback fails setCurrentStep(null); }, }); // Monitor tasks and call onComplete when all tasks are done useEffect(() => { if (currentStep === null || !tasks || !isEmbedding) { return; } // Check if there are any active tasks (pending, running, or processing) const activeTasks = tasks.find( (task) => task.status === "pending" || task.status === "running" || task.status === "processing", ); // Check if any file failed in completed tasks const completedTasks = tasks.filter( (task) => task.status === "completed" ); // Check if any completed task has at least one failed file const taskWithFailedFile = completedTasks.find((task) => { // Must have files object if (!task.files || typeof task.files !== "object") { return false; } const fileEntries = Object.values(task.files); // Must have at least one file if (fileEntries.length === 0) { return false; } // Check if any file has failed status const hasFailedFile = fileEntries.some( (file) => file.status === "failed" || file.status === "error" ); return hasFailedFile; }); // If any file failed, show error and jump back one step (like onboardingMutation.onError) // Only handle if we haven't already handled this task if ( taskWithFailedFile && !rollbackMutation.isPending && !isCompleted && !handledFailedTasksRef.current.has(taskWithFailedFile.task_id) ) { console.error("File failed in task, jumping back one step", taskWithFailedFile); // Mark this task as handled to prevent infinite loops handledFailedTasksRef.current.add(taskWithFailedFile.task_id); // Extract error messages from failed files const errorMessages: string[] = []; if (taskWithFailedFile.files) { Object.values(taskWithFailedFile.files).forEach((file) => { if ((file.status === "failed" || file.status === "error") && file.error) { errorMessages.push(file.error); } }); } // Also check task-level error if (taskWithFailedFile.error) { errorMessages.push(taskWithFailedFile.error); } // Use the first error message, or a generic message if no errors found const errorMessage = errorMessages.length > 0 ? errorMessages[0] : "Sample data file failed to ingest. Please try again with a different configuration."; // Set error message and jump back one step (exactly like onboardingMutation.onError) setError(errorMessage); setCurrentStep(totalSteps); // Jump back one step after 1 second (go back to the step before ingestion) // For embedding: totalSteps is 4, ingestion is step 3, so go back to step 2 // For LLM: totalSteps is 3, ingestion is step 2, so go back to step 1 setTimeout(() => { // Go back to the step before the last step (which is ingestion) const previousStep = totalSteps > 1 ? totalSteps - 2 : 0; setCurrentStep(previousStep); }, 1000); return; } // If no active tasks and we've started onboarding, complete it if ( (!activeTasks || (activeTasks.processed_files ?? 0) > 0) && tasks.length > 0 && !isCompleted && !taskWithFailedFile ) { // Set to final step to show "Done" setCurrentStep(totalSteps); // Wait a bit before completing setTimeout(() => { onComplete(); }, 1000); } }, [tasks, currentStep, onComplete, isCompleted, isEmbedding, totalSteps, rollbackMutation]); // Mutations const onboardingMutation = useOnboardingMutation({ onSuccess: (data) => { console.log("Onboarding completed successfully", data); // Save OpenRAG docs filter ID if sample data was ingested if (data.openrag_docs_filter_id && typeof window !== "undefined") { localStorage.setItem( "onboarding_openrag_docs_filter_id", data.openrag_docs_filter_id ); console.log("Saved OpenRAG docs filter ID:", data.openrag_docs_filter_id); } // Update provider health cache to healthy since backend just validated const provider = (isEmbedding ? settings.embedding_provider : settings.llm_provider) || modelProvider; const healthData: ProviderHealthResponse = { status: "healthy", message: "Provider is configured and working correctly", provider: provider, }; queryClient.setQueryData(["provider", "health"], healthData); setError(null); if (!isEmbedding) { setCurrentStep(totalSteps); setTimeout(() => { onComplete(); }, 1000); } else { setCurrentStep(0); } }, onError: (error) => { setError(error.message); setCurrentStep(totalSteps); // Reset to provider selection after 1 second setTimeout(() => { setCurrentStep(null); }, 1000); }, }); const handleComplete = () => { const currentProvider = isEmbedding ? settings.embedding_provider : settings.llm_provider; if ( !currentProvider || (isEmbedding && !settings.embedding_model && !showProviderConfiguredMessage) || (!isEmbedding && !settings.llm_model) ) { toast.error("Please complete all required fields"); return; } // Clear any previous error setError(null); // Prepare onboarding data with provider-specific fields const onboardingData: OnboardingVariables = { sample_data: sampleDataset, }; // Set the provider field if (isEmbedding) { onboardingData.embedding_provider = currentProvider; // If provider is already configured, use the existing embedding model from settings // Otherwise, use the embedding model from the form if ( showProviderConfiguredMessage && currentSettings?.knowledge?.embedding_model ) { onboardingData.embedding_model = currentSettings.knowledge.embedding_model; } else { onboardingData.embedding_model = settings.embedding_model; } } else { onboardingData.llm_provider = currentProvider; onboardingData.llm_model = settings.llm_model; } // Add provider-specific credentials based on the selected provider if (currentProvider === "openai" && settings.openai_api_key) { onboardingData.openai_api_key = settings.openai_api_key; } else if (currentProvider === "anthropic" && settings.anthropic_api_key) { onboardingData.anthropic_api_key = settings.anthropic_api_key; } else if (currentProvider === "watsonx") { if (settings.watsonx_api_key) { onboardingData.watsonx_api_key = settings.watsonx_api_key; } if (settings.watsonx_endpoint) { onboardingData.watsonx_endpoint = settings.watsonx_endpoint; } if (settings.watsonx_project_id) { onboardingData.watsonx_project_id = settings.watsonx_project_id; } } else if (currentProvider === "ollama" && settings.ollama_endpoint) { onboardingData.ollama_endpoint = settings.ollama_endpoint; } // Record the start time when user clicks Complete setProcessingStartTime(Date.now()); onboardingMutation.mutate(onboardingData); setCurrentStep(0); }; const isComplete = (isEmbedding && (!!settings.embedding_model || showProviderConfiguredMessage)) || (!isEmbedding && !!settings.llm_model && isDoclingHealthy); return ( {currentStep === null ? (
{error && (
{error}
)}
{!isEmbedding && (
Anthropic
)}
OpenAI
IBM watsonx.ai
Ollama
{!isEmbedding && ( )}
{!isComplete && ( {isLoadingModels ? "Loading models..." : !!settings.llm_model && !!settings.embedding_model && !isDoclingHealthy ? "docling-serve must be running to continue" : "Please fill in all required fields"} )}
) : ( )}
); }; export default OnboardingCard;