+
+
+ );
}
diff --git a/frontend/src/app/onboarding/components/advanced.tsx b/frontend/src/app/onboarding/components/advanced.tsx
index 565b5af9..94145b35 100644
--- a/frontend/src/app/onboarding/components/advanced.tsx
+++ b/frontend/src/app/onboarding/components/advanced.tsx
@@ -75,20 +75,6 @@ export function AdvancedOnboarding({
/>
)}
- {(hasLanguageModels || hasEmbeddingModels) && !updatedOnboarding &&
- )}
diff --git a/frontend/src/app/onboarding/components/animated-provider-steps.tsx b/frontend/src/app/onboarding/components/animated-provider-steps.tsx
new file mode 100644
index 00000000..2d943789
--- /dev/null
+++ b/frontend/src/app/onboarding/components/animated-provider-steps.tsx
@@ -0,0 +1,87 @@
+"use client";
+
+import { AnimatePresence, motion } from "framer-motion";
+import { CheckIcon } from "lucide-react";
+import { useEffect } from "react";
+import { AnimatedProcessingIcon } from "@/components/ui/animated-processing-icon";
+import { cn } from "@/lib/utils";
+
+export function AnimatedProviderSteps({
+ currentStep,
+ setCurrentStep,
+ steps,
+}: {
+ currentStep: number;
+ setCurrentStep: (step: number) => void;
+ steps: string[];
+}) {
+
+ useEffect(() => {
+ if (currentStep < steps.length - 1) {
+ const interval = setInterval(() => {
+ setCurrentStep(currentStep + 1);
+ }, 1500);
+ return () => clearInterval(interval);
+ }
+ }, [currentStep, setCurrentStep, steps]);
+
+ const isDone = currentStep >= steps.length;
+
+ return (
+
+ );
+}
diff --git a/frontend/src/app/onboarding/components/ibm-onboarding.tsx b/frontend/src/app/onboarding/components/ibm-onboarding.tsx
index b696e220..cd638025 100644
--- a/frontend/src/app/onboarding/components/ibm-onboarding.tsx
+++ b/frontend/src/app/onboarding/components/ibm-onboarding.tsx
@@ -1,4 +1,4 @@
-import { useState } from "react";
+import { useEffect, useState } from "react";
import { LabelInput } from "@/components/label-input";
import { LabelWrapper } from "@/components/label-wrapper";
import IBMLogo from "@/components/logo/ibm-logo";
@@ -14,10 +14,14 @@ export function IBMOnboarding({
setSettings,
sampleDataset,
setSampleDataset,
+ setIsLoadingModels,
+ setLoadingStatus,
}: {
setSettings: (settings: OnboardingVariables) => void;
sampleDataset: boolean;
setSampleDataset: (dataset: boolean) => void;
+ setIsLoadingModels?: (isLoading: boolean) => void;
+ setLoadingStatus?: (status: string[]) => void;
}) {
const [endpoint, setEndpoint] = useState("https://us-south.ml.cloud.ibm.com");
const [apiKey, setApiKey] = useState("");
@@ -99,6 +103,19 @@ export function IBMOnboarding({
},
setSettings,
);
+
+ // Notify parent about loading state
+ useEffect(() => {
+ setIsLoadingModels?.(isLoadingModels);
+
+ // Set detailed loading status
+ if (isLoadingModels) {
+ const status = ["Connecting to IBM watsonx.ai", "Fetching language models", "Fetching embedding models"];
+ setLoadingStatus?.(status);
+ } else {
+ setLoadingStatus?.([]);
+ }
+ }, [isLoadingModels, setIsLoadingModels, setLoadingStatus]);
return (
<>
diff --git a/frontend/src/app/onboarding/components/ollama-onboarding.tsx b/frontend/src/app/onboarding/components/ollama-onboarding.tsx
index b40e6714..82d86d83 100644
--- a/frontend/src/app/onboarding/components/ollama-onboarding.tsx
+++ b/frontend/src/app/onboarding/components/ollama-onboarding.tsx
@@ -7,154 +7,160 @@ import type { OnboardingVariables } from "../../api/mutations/useOnboardingMutat
import { useGetOllamaModelsQuery } from "../../api/queries/useGetModelsQuery";
import { useModelSelection } from "../hooks/useModelSelection";
import { useUpdateSettings } from "../hooks/useUpdateSettings";
-import { AdvancedOnboarding } from "./advanced";
import { ModelSelector } from "./model-selector";
export function OllamaOnboarding({
- setSettings,
- sampleDataset,
- setSampleDataset,
+ setSettings,
+ sampleDataset,
+ setSampleDataset,
+ setIsLoadingModels,
+ setLoadingStatus,
}: {
- setSettings: (settings: OnboardingVariables) => void;
- sampleDataset: boolean;
- setSampleDataset: (dataset: boolean) => void;
+ setSettings: (settings: OnboardingVariables) => void;
+ sampleDataset: boolean;
+ setSampleDataset: (dataset: boolean) => void;
+ setIsLoadingModels?: (isLoading: boolean) => void;
+ setLoadingStatus?: (status: string[]) => void;
}) {
- const [endpoint, setEndpoint] = useState(`http://localhost:11434`);
- const [showConnecting, setShowConnecting] = useState(false);
- const debouncedEndpoint = useDebouncedValue(endpoint, 500);
+ const [endpoint, setEndpoint] = useState(`http://localhost:11434`);
+ const [showConnecting, setShowConnecting] = useState(false);
+ const debouncedEndpoint = useDebouncedValue(endpoint, 500);
- // Fetch models from API when endpoint is provided (debounced)
- const {
- data: modelsData,
- isLoading: isLoadingModels,
- error: modelsError,
- } = useGetOllamaModelsQuery(
- debouncedEndpoint ? { endpoint: debouncedEndpoint } : undefined,
- );
+ // Fetch models from API when endpoint is provided (debounced)
+ const {
+ data: modelsData,
+ isLoading: isLoadingModels,
+ error: modelsError,
+ } = useGetOllamaModelsQuery(
+ debouncedEndpoint ? { endpoint: debouncedEndpoint } : undefined,
+ );
- // Use custom hook for model selection logic
- const {
- languageModel,
- embeddingModel,
- setLanguageModel,
- setEmbeddingModel,
- languageModels,
- embeddingModels,
- } = useModelSelection(modelsData);
+ // Use custom hook for model selection logic
+ const {
+ languageModel,
+ embeddingModel,
+ setLanguageModel,
+ setEmbeddingModel,
+ languageModels,
+ embeddingModels,
+ } = useModelSelection(modelsData);
- // Handle delayed display of connecting state
- useEffect(() => {
- let timeoutId: NodeJS.Timeout;
+ // Handle delayed display of connecting state
+ useEffect(() => {
+ let timeoutId: NodeJS.Timeout;
- if (debouncedEndpoint && isLoadingModels) {
- timeoutId = setTimeout(() => {
- setShowConnecting(true);
- }, 500);
- } else {
- setShowConnecting(false);
- }
+ if (debouncedEndpoint && isLoadingModels) {
+ timeoutId = setTimeout(() => {
+ setShowConnecting(true);
+ }, 500);
+ } else {
+ setShowConnecting(false);
+ }
- return () => {
- if (timeoutId) {
- clearTimeout(timeoutId);
- }
- };
- }, [debouncedEndpoint, isLoadingModels]);
+ return () => {
+ if (timeoutId) {
+ clearTimeout(timeoutId);
+ }
+ };
+ }, [debouncedEndpoint, isLoadingModels]);
- const handleSampleDatasetChange = (dataset: boolean) => {
- setSampleDataset(dataset);
- };
+ // Update settings when values change
+ useUpdateSettings(
+ "ollama",
+ {
+ endpoint,
+ languageModel,
+ embeddingModel,
+ },
+ setSettings,
+ );
- // Update settings when values change
- useUpdateSettings(
- "ollama",
- {
- endpoint,
- languageModel,
- embeddingModel,
- },
- setSettings,
- );
+ // Notify parent about loading state
+ useEffect(() => {
+ setIsLoadingModels?.(isLoadingModels);
- // Check validation state based on models query
- const hasConnectionError = debouncedEndpoint && modelsError;
- const hasNoModels =
- modelsData &&
- !modelsData.language_models?.length &&
- !modelsData.embedding_models?.length;
+ // Set detailed loading status
+ if (isLoadingModels) {
+ const status = ["Connecting to Ollama", "Fetching language models", "Fetching embedding models"];
+ setLoadingStatus?.(status);
+ } else {
+ setLoadingStatus?.([]);
+ }
+ }, [isLoadingModels, setIsLoadingModels, setLoadingStatus]);
- return (
- <>
-
-
-
setEndpoint(e.target.value)}
- />
- {showConnecting && (
-
- Connecting to Ollama server...
-
- )}
- {hasConnectionError && (
-
- Can’t reach Ollama at {debouncedEndpoint}. Update the base URL or
- start the server.
-
- )}
- {hasNoModels && (
-
- No models found. Install embedding and agent models on your Ollama
- server.
-
- )}
-
-
- }
- noOptionsPlaceholder={
- isLoadingModels
- ? "Loading models..."
- : "No embedding models detected. Install an embedding model to continue."
- }
- value={embeddingModel}
- onValueChange={setEmbeddingModel}
- />
-
-
- }
- noOptionsPlaceholder={
- isLoadingModels
- ? "Loading models..."
- : "No language models detected. Install a language model to continue."
- }
- value={languageModel}
- onValueChange={setLanguageModel}
- />
-
-
-
- >
- );
+ // Check validation state based on models query
+ const hasConnectionError = debouncedEndpoint && modelsError;
+ const hasNoModels =
+ modelsData &&
+ !modelsData.language_models?.length &&
+ !modelsData.embedding_models?.length;
+
+ return (
+
+
+
setEndpoint(e.target.value)}
+ />
+ {showConnecting && (
+
+ Connecting to Ollama server...
+
+ )}
+ {hasConnectionError && (
+
+ Can’t reach Ollama at {debouncedEndpoint}. Update the base URL or
+ start the server.
+
+ )}
+ {hasNoModels && (
+
+ No models found. Install embedding and agent models on your Ollama
+ server.
+
+ )}
+
+
+ }
+ noOptionsPlaceholder={
+ isLoadingModels
+ ? "Loading models..."
+ : "No embedding models detected. Install an embedding model to continue."
+ }
+ value={embeddingModel}
+ onValueChange={setEmbeddingModel}
+ />
+
+
+ }
+ noOptionsPlaceholder={
+ isLoadingModels
+ ? "Loading models..."
+ : "No language models detected. Install a language model to continue."
+ }
+ value={languageModel}
+ onValueChange={setLanguageModel}
+ />
+
+
+ );
}
diff --git a/frontend/src/app/onboarding/components/onboarding-card.tsx b/frontend/src/app/onboarding/components/onboarding-card.tsx
index 58f629f7..64413fe3 100644
--- a/frontend/src/app/onboarding/components/onboarding-card.tsx
+++ b/frontend/src/app/onboarding/components/onboarding-card.tsx
@@ -1,182 +1,322 @@
"use client";
-import { useState } from "react";
+import { AnimatePresence, motion } from "framer-motion";
+import { useEffect, useState } from "react";
import { toast } from "sonner";
import {
- type OnboardingVariables,
- useOnboardingMutation,
+ type OnboardingVariables,
+ useOnboardingMutation,
} from "@/app/api/mutations/useOnboardingMutation";
+import { useGetTasksQuery } from "@/app/api/queries/useGetTasksQuery";
import { useDoclingHealth } from "@/components/docling-health-banner";
import IBMLogo from "@/components/logo/ibm-logo";
import OllamaLogo from "@/components/logo/ollama-logo";
import OpenAILogo from "@/components/logo/openai-logo";
+import { AnimatedProcessingIcon } from "@/components/ui/animated-processing-icon";
import { Button } from "@/components/ui/button";
import {
- Card,
- CardContent,
- CardFooter,
- CardHeader,
+ Card,
+ CardContent,
+ CardFooter,
+ CardHeader,
} from "@/components/ui/card";
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs";
import {
- Tooltip,
- TooltipContent,
- TooltipTrigger,
+ Tooltip,
+ TooltipContent,
+ TooltipTrigger,
} from "@/components/ui/tooltip";
+import { cn } from "@/lib/utils";
+import { AnimatedProviderSteps } from "./animated-provider-steps";
import { IBMOnboarding } from "./ibm-onboarding";
import { OllamaOnboarding } from "./ollama-onboarding";
import { OpenAIOnboarding } from "./openai-onboarding";
interface OnboardingCardProps {
- onComplete: () => void;
+ onComplete: () => void;
+ setIsLoadingModels?: (isLoading: boolean) => void;
+ setLoadingStatus?: (status: string[]) => void;
}
-const OnboardingCard = ({ onComplete }: OnboardingCardProps) => {
- const updatedOnboarding = process.env.UPDATED_ONBOARDING === "true";
- const { isHealthy: isDoclingHealthy } = useDoclingHealth();
+const STEP_LIST = [
+ "Setting up your model provider",
+ "Defining schema",
+ "Configuring Langflow",
+ "Ingesting sample data",
+];
- const [modelProvider, setModelProvider] = useState
("openai");
+const TOTAL_PROVIDER_STEPS = STEP_LIST.length;
- const [sampleDataset, setSampleDataset] = useState(true);
+const OnboardingCard = ({
+ onComplete,
+ setIsLoadingModels: setIsLoadingModelsParent,
+ setLoadingStatus: setLoadingStatusParent,
+}: OnboardingCardProps) => {
+ const { isHealthy: isDoclingHealthy } = useDoclingHealth();
- const handleSetModelProvider = (provider: string) => {
- setModelProvider(provider);
- setSettings({
- model_provider: provider,
- embedding_model: "",
- llm_model: "",
- });
- };
+ const [modelProvider, setModelProvider] = useState("openai");
- const [settings, setSettings] = useState({
- model_provider: modelProvider,
- embedding_model: "",
- llm_model: "",
- });
+ const [sampleDataset, setSampleDataset] = useState(true);
- // Mutations
- const onboardingMutation = useOnboardingMutation({
- onSuccess: (data) => {
- console.log("Onboarding completed successfully", data);
- onComplete();
- },
- onError: (error) => {
- toast.error("Failed to complete onboarding", {
- description: error.message,
- });
- },
- });
+ const [isLoadingModels, setIsLoadingModels] = useState(false);
- const handleComplete = () => {
- if (
- !settings.model_provider ||
- !settings.llm_model ||
- !settings.embedding_model
- ) {
- toast.error("Please complete all required fields");
- return;
- }
+ const [loadingStatus, setLoadingStatus] = useState([]);
- // Prepare onboarding data
- const onboardingData: OnboardingVariables = {
- model_provider: settings.model_provider,
- llm_model: settings.llm_model,
- embedding_model: settings.embedding_model,
- sample_data: sampleDataset,
- };
+ const [currentStatusIndex, setCurrentStatusIndex] = useState(0);
- // Add API key if available
- if (settings.api_key) {
- onboardingData.api_key = settings.api_key;
- }
+ // Pass loading state to parent
+ useEffect(() => {
+ setIsLoadingModelsParent?.(isLoadingModels);
+ }, [isLoadingModels, setIsLoadingModelsParent]);
- // Add endpoint if available
- if (settings.endpoint) {
- onboardingData.endpoint = settings.endpoint;
- }
+ useEffect(() => {
+ setLoadingStatusParent?.(loadingStatus);
+ }, [loadingStatus, setLoadingStatusParent]);
- // Add project_id if available
- if (settings.project_id) {
- onboardingData.project_id = settings.project_id;
- }
+ // Cycle through loading status messages once
+ useEffect(() => {
+ if (!isLoadingModels || loadingStatus.length === 0) {
+ setCurrentStatusIndex(0);
+ return;
+ }
- onboardingMutation.mutate(onboardingData);
- };
+ const interval = setInterval(() => {
+ setCurrentStatusIndex((prev) => {
+ const nextIndex = prev + 1;
+ // Stop at the last message
+ if (nextIndex >= loadingStatus.length - 1) {
+ clearInterval(interval);
+ return loadingStatus.length - 1;
+ }
+ return nextIndex;
+ });
+ }, 1500); // Change status every 1.5 seconds
- const isComplete = !!settings.llm_model && !!settings.embedding_model && isDoclingHealthy;
+ return () => clearInterval(interval);
+ }, [isLoadingModels, loadingStatus]);
- return (
-
-
-
-
-
-
- OpenAI
-
-
-
- IBM watsonx.ai
-
-
-
- Ollama
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- {!isComplete && (
-
- {!!settings.llm_model && !!settings.embedding_model && !isDoclingHealthy
- ? "docling-serve must be running to continue"
- : "Please fill in all required fields"}
-
- )}
-
-
-
- )
-}
+ const handleSetModelProvider = (provider: string) => {
+ setModelProvider(provider);
+ setSettings({
+ model_provider: provider,
+ embedding_model: "",
+ llm_model: "",
+ });
+ };
+
+ const [settings, setSettings] = useState({
+ model_provider: modelProvider,
+ embedding_model: "",
+ llm_model: "",
+ });
+
+ const [currentStep, setCurrentStep] = useState(null);
+
+ // Query tasks to track completion
+ const { data: tasks } = useGetTasksQuery({
+ enabled: currentStep !== null, // Only poll when onboarding has started
+ refetchInterval: currentStep !== null ? 1000 : false, // Poll every 1 second during onboarding
+ });
+
+ // Monitor tasks and call onComplete when all tasks are done
+ useEffect(() => {
+ if (currentStep === null || !tasks) {
+ return;
+ }
+
+ // Check if there are any active tasks (pending, running, or processing)
+ const activeTasks = tasks.find(
+ (task) =>
+ task.status === "pending" ||
+ task.status === "running" ||
+ task.status === "processing",
+ );
+
+ // If no active tasks and we've started onboarding, complete it
+ if (
+ (!activeTasks || (activeTasks.processed_files ?? 0) > 0) &&
+ tasks.length > 0
+ ) {
+ // Set to final step to show "Done"
+ setCurrentStep(TOTAL_PROVIDER_STEPS);
+ // Wait a bit before completing
+ setTimeout(() => {
+ onComplete();
+ }, 1000);
+ }
+ }, [tasks, currentStep, onComplete]);
+
+ // Mutations
+ const onboardingMutation = useOnboardingMutation({
+ onSuccess: (data) => {
+ console.log("Onboarding completed successfully", data);
+ setCurrentStep(0);
+ },
+ onError: (error) => {
+ toast.error("Failed to complete onboarding", {
+ description: error.message,
+ });
+ },
+ });
+
+ const handleComplete = () => {
+ if (
+ !settings.model_provider ||
+ !settings.llm_model ||
+ !settings.embedding_model
+ ) {
+ toast.error("Please complete all required fields");
+ return;
+ }
+
+ // Prepare onboarding data
+ const onboardingData: OnboardingVariables = {
+ model_provider: settings.model_provider,
+ llm_model: settings.llm_model,
+ embedding_model: settings.embedding_model,
+ sample_data: sampleDataset,
+ };
+
+ // Add API key if available
+ if (settings.api_key) {
+ onboardingData.api_key = settings.api_key;
+ }
+
+ // Add endpoint if available
+ if (settings.endpoint) {
+ onboardingData.endpoint = settings.endpoint;
+ }
+
+ // Add project_id if available
+ if (settings.project_id) {
+ onboardingData.project_id = settings.project_id;
+ }
+
+ onboardingMutation.mutate(onboardingData);
+ setCurrentStep(0);
+ };
+
+ const isComplete =
+ !!settings.llm_model && !!settings.embedding_model && isDoclingHealthy;
+
+ return (
+
+ {currentStep === null ? (
+
+
+
+
+
+
+
+
+ OpenAI
+
+
+
+
+
+ IBM watsonx.ai
+
+
+
+ Ollama
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {!isLoadingModels && (
+
+
+
+
+
+
+ {!isComplete && (
+
+ {!!settings.llm_model &&
+ !!settings.embedding_model &&
+ !isDoclingHealthy
+ ? "docling-serve must be running to continue"
+ : "Please fill in all required fields"}
+
+ )}
+
+ )}
+
+
+ ) : (
+
+
+
+ )}
+
+ );
+};
export default OnboardingCard;
diff --git a/frontend/src/app/onboarding/components/openai-onboarding.tsx b/frontend/src/app/onboarding/components/openai-onboarding.tsx
index b057efc0..01646ad9 100644
--- a/frontend/src/app/onboarding/components/openai-onboarding.tsx
+++ b/frontend/src/app/onboarding/components/openai-onboarding.tsx
@@ -1,4 +1,4 @@
-import { useState } from "react";
+import { useEffect, useState } from "react";
import { LabelInput } from "@/components/label-input";
import { LabelWrapper } from "@/components/label-wrapper";
import OpenAILogo from "@/components/logo/openai-logo";
@@ -14,10 +14,14 @@ export function OpenAIOnboarding({
setSettings,
sampleDataset,
setSampleDataset,
+ setIsLoadingModels,
+ setLoadingStatus,
}: {
setSettings: (settings: OnboardingVariables) => void;
sampleDataset: boolean;
setSampleDataset: (dataset: boolean) => void;
+ setIsLoadingModels?: (isLoading: boolean) => void;
+ setLoadingStatus?: (status: string[]) => void;
}) {
const [apiKey, setApiKey] = useState("");
const [getFromEnv, setGetFromEnv] = useState(true);
@@ -68,6 +72,19 @@ export function OpenAIOnboarding({
},
setSettings,
);
+
+ // Notify parent about loading state
+ useEffect(() => {
+ setIsLoadingModels?.(isLoadingModels);
+
+ // Set detailed loading status
+ if (isLoadingModels) {
+ const status = ["Connecting to OpenAI", "Fetching language models", "Fetching embedding models"];
+ setLoadingStatus?.(status);
+ } else {
+ setLoadingStatus?.([]);
+ }
+ }, [isLoadingModels, setIsLoadingModels, setLoadingStatus]);
return (
<>
diff --git a/frontend/src/components/animated-conditional.tsx b/frontend/src/components/animated-conditional.tsx
new file mode 100644
index 00000000..e2499890
--- /dev/null
+++ b/frontend/src/components/animated-conditional.tsx
@@ -0,0 +1,53 @@
+import { motion } from "framer-motion";
+import { ANIMATION_DURATION } from "@/lib/constants";
+
+export const AnimatedConditional = ({
+ children,
+ isOpen,
+ className,
+ slide = false,
+ delay,
+ vertical = false,
+}: {
+ children: React.ReactNode;
+ isOpen: boolean;
+ className?: string;
+ delay?: number;
+ vertical?: boolean;
+ slide?: boolean;
+}) => {
+ const animationProperty = slide
+ ? vertical
+ ? "translateY"
+ : "translateX"
+ : vertical
+ ? "height"
+ : "width";
+ const animationValue = isOpen
+ ? slide
+ ? "0px"
+ : "auto"
+ : slide
+ ? "-100%"
+ : "0px";
+
+ return (
+
+ {children}
+
+ );
+};
diff --git a/frontend/src/components/chat-renderer.tsx b/frontend/src/components/chat-renderer.tsx
new file mode 100644
index 00000000..925ddb76
--- /dev/null
+++ b/frontend/src/components/chat-renderer.tsx
@@ -0,0 +1,203 @@
+"use client";
+
+import { motion } from "framer-motion";
+import { usePathname } from "next/navigation";
+import { useEffect, useState } from "react";
+import {
+ type ChatConversation,
+ useGetConversationsQuery,
+} from "@/app/api/queries/useGetConversationsQuery";
+import type { Settings } from "@/app/api/queries/useGetSettingsQuery";
+import { OnboardingContent } from "@/app/new-onboarding/components/onboarding-content";
+import { ProgressBar } from "@/app/new-onboarding/components/progress-bar";
+import { AnimatedConditional } from "@/components/animated-conditional";
+import { Header } from "@/components/header";
+import { Navigation } from "@/components/navigation";
+import { useAuth } from "@/contexts/auth-context";
+import { useChat } from "@/contexts/chat-context";
+import {
+ ANIMATION_DURATION,
+ HEADER_HEIGHT,
+ ONBOARDING_STEP_KEY,
+ SIDEBAR_WIDTH,
+ TOTAL_ONBOARDING_STEPS,
+} from "@/lib/constants";
+import { cn } from "@/lib/utils";
+
+export function ChatRenderer({
+ settings,
+ children,
+}: {
+ settings: Settings | undefined;
+ children: React.ReactNode;
+}) {
+ const pathname = usePathname();
+ const { isAuthenticated, isNoAuthMode } = useAuth();
+ const {
+ endpoint,
+ refreshTrigger,
+ refreshConversations,
+ startNewConversation,
+ } = useChat();
+
+ // Initialize onboarding state based on local storage and settings
+ const [currentStep, setCurrentStep] = useState
(() => {
+ if (typeof window === "undefined") return 0;
+ const savedStep = localStorage.getItem(ONBOARDING_STEP_KEY);
+ return savedStep !== null ? parseInt(savedStep, 10) : 0;
+ });
+
+ const [showLayout, setShowLayout] = useState(() => {
+ if (typeof window === "undefined") return false;
+ const savedStep = localStorage.getItem(ONBOARDING_STEP_KEY);
+ // Show layout if settings.edited is true and if no onboarding step is saved
+ const isEdited = settings?.edited ?? true;
+ return isEdited ? savedStep === null : false;
+ });
+
+ // Only fetch conversations on chat page
+ const isOnChatPage = pathname === "/" || pathname === "/chat";
+ const { data: conversations = [], isLoading: isConversationsLoading } =
+ useGetConversationsQuery(endpoint, refreshTrigger, {
+ enabled: isOnChatPage && (isAuthenticated || isNoAuthMode),
+ }) as { data: ChatConversation[]; isLoading: boolean };
+
+ const handleNewConversation = () => {
+ refreshConversations();
+ startNewConversation();
+ };
+
+ // Save current step to local storage whenever it changes
+ useEffect(() => {
+ if (typeof window !== "undefined" && !showLayout) {
+ localStorage.setItem(ONBOARDING_STEP_KEY, currentStep.toString());
+ }
+ }, [currentStep, showLayout]);
+
+ const handleStepComplete = () => {
+ if (currentStep < TOTAL_ONBOARDING_STEPS - 1) {
+ setCurrentStep(currentStep + 1);
+ } else {
+ // Onboarding is complete - remove from local storage and show layout
+ if (typeof window !== "undefined") {
+ localStorage.removeItem(ONBOARDING_STEP_KEY);
+ }
+ setShowLayout(true);
+ }
+ };
+
+ // List of paths with smaller max-width
+ const smallWidthPaths = ["/settings/connector/new"];
+ const isSmallWidthPath = smallWidthPaths.includes(pathname);
+
+ const x = showLayout ? "0px" : `calc(-${SIDEBAR_WIDTH / 2}px + 50vw)`;
+ const y = showLayout ? "0px" : `calc(-${HEADER_HEIGHT / 2}px + 50vh)`;
+ const translateY = showLayout ? "0px" : `-50vh`;
+ const translateX = showLayout ? "0px" : `-50vw`;
+
+ // For all other pages, render with Langflow-styled navigation and task menu
+ return (
+ <>
+
+
+
+
+ {/* Sidebar Navigation */}
+
+
+
+
+ {/* Main Content */}
+
+
+
+
+
+ {children}
+
+ {!showLayout && (
+
+ )}
+
+
+
+
+
+
+
+ >
+ );
+}
diff --git a/frontend/src/components/header.tsx b/frontend/src/components/header.tsx
new file mode 100644
index 00000000..e542b53e
--- /dev/null
+++ b/frontend/src/components/header.tsx
@@ -0,0 +1,60 @@
+"use client";
+
+import { Bell } from "lucide-react";
+import Logo from "@/components/logo/logo";
+import { UserNav } from "@/components/user-nav";
+import { useTask } from "@/contexts/task-context";
+import { cn } from "@/lib/utils";
+
+export function Header() {
+ const { tasks, toggleMenu } = useTask();
+
+ // Calculate active tasks for the bell icon
+ const activeTasks = tasks.filter(
+ (task) =>
+ task.status === "pending" ||
+ task.status === "running" ||
+ task.status === "processing",
+ );
+
+ return (
+
+
+ {/* Logo/Title */}
+
+
+ OpenRAG
+
+
+
+
+ {/* Knowledge Filter Dropdown */}
+ {/*
*/}
+
+ {/* GitHub Star Button */}
+ {/*
*/}
+
+ {/* Discord Link */}
+ {/*
*/}
+
+ {/* Task Notification Bell */}
+
+
+ {/* Separator */}
+
+
+
+
+
+
+ );
+}
diff --git a/frontend/src/components/layout-wrapper.tsx b/frontend/src/components/layout-wrapper.tsx
index 130ad3f0..36713e74 100644
--- a/frontend/src/components/layout-wrapper.tsx
+++ b/frontend/src/components/layout-wrapper.tsx
@@ -1,39 +1,25 @@
"use client";
-import { Bell, Loader2 } from "lucide-react";
+import { Loader2 } from "lucide-react";
import { usePathname } from "next/navigation";
-import {
- useGetConversationsQuery,
- type ChatConversation,
-} from "@/app/api/queries/useGetConversationsQuery";
import { useGetSettingsQuery } from "@/app/api/queries/useGetSettingsQuery";
import { DoclingHealthBanner } from "@/components/docling-health-banner";
import { KnowledgeFilterPanel } from "@/components/knowledge-filter-panel";
-import Logo from "@/components/logo/logo";
-import { Navigation } from "@/components/navigation";
import { TaskNotificationMenu } from "@/components/task-notification-menu";
-import { UserNav } from "@/components/user-nav";
import { useAuth } from "@/contexts/auth-context";
-import { useChat } from "@/contexts/chat-context";
import { useKnowledgeFilter } from "@/contexts/knowledge-filter-context";
-// import { GitHubStarButton } from "@/components/github-star-button"
-// import { DiscordLink } from "@/components/discord-link"
import { useTask } from "@/contexts/task-context";
-import { useDoclingHealthQuery } from "@/src/app/api/queries/useDoclingHealthQuery";
import { cn } from "@/lib/utils";
+import { useDoclingHealthQuery } from "@/src/app/api/queries/useDoclingHealthQuery";
+import { ChatRenderer } from "./chat-renderer";
export function LayoutWrapper({ children }: { children: React.ReactNode }) {
const pathname = usePathname();
- const { tasks, isMenuOpen, toggleMenu } = useTask();
+ const { isMenuOpen } = useTask();
const { isPanelOpen } = useKnowledgeFilter();
const { isLoading, isAuthenticated, isNoAuthMode } = useAuth();
- const {
- endpoint,
- refreshTrigger,
- refreshConversations,
- startNewConversation,
- } = useChat();
- const { isLoading: isSettingsLoading } = useGetSettingsQuery({
+
+ const { data: settings, isLoading: isSettingsLoading } = useGetSettingsQuery({
enabled: isAuthenticated || isNoAuthMode,
});
const {
@@ -42,40 +28,17 @@ export function LayoutWrapper({ children }: { children: React.ReactNode }) {
isError,
} = useDoclingHealthQuery();
- // Only fetch conversations on chat page
- const isOnChatPage = pathname === "/" || pathname === "/chat";
- const { data: conversations = [], isLoading: isConversationsLoading } =
- useGetConversationsQuery(endpoint, refreshTrigger, {
- enabled: isOnChatPage && (isAuthenticated || isNoAuthMode),
- }) as { data: ChatConversation[]; isLoading: boolean };
-
- const handleNewConversation = () => {
- refreshConversations();
- startNewConversation();
- };
-
// List of paths that should not show navigation
- const authPaths = ["/login", "/auth/callback", "/onboarding", "/new-onboarding"];
+ const authPaths = ["/login", "/auth/callback"];
const isAuthPage = authPaths.includes(pathname);
const isOnKnowledgePage = pathname.startsWith("/knowledge");
- // List of paths with smaller max-width
- const smallWidthPaths = ["/settings/connector/new"];
- const isSmallWidthPath = smallWidthPaths.includes(pathname);
-
- // Calculate active tasks for the bell icon
- const activeTasks = tasks.filter(
- task =>
- task.status === "pending" ||
- task.status === "running" ||
- task.status === "processing"
- );
-
const isUnhealthy = health?.status === "unhealthy" || isError;
const isBannerVisible = !isHealthLoading && isUnhealthy;
+ const isSettingsLoadingOrError = isSettingsLoading || !settings;
// Show loading state when backend isn't ready
- if (isLoading || isSettingsLoading) {
+ if (isLoading || (isSettingsLoadingOrError && (isNoAuthMode || isAuthenticated))) {
return (
@@ -93,88 +56,31 @@ export function LayoutWrapper({ children }: { children: React.ReactNode }) {
// For all other pages, render with Langflow-styled navigation and task menu
return (
-
-
-
+
+
+
+
+
+
+
{children}
+
+ {/* Task Notifications Panel */}
+
+
+ {/* Knowledge Filter Panel */}
+
-
-
- {/* Logo/Title */}
-
-
- OpenRAG
-
-
-
-
- {/* Knowledge Filter Dropdown */}
- {/*
*/}
-
- {/* GitHub Star Button */}
- {/*
*/}
-
- {/* Discord Link */}
- {/*
*/}
-
- {/* Task Notification Bell */}
-
-
- {/* Separator */}
-
-
-
-
-
-
-
- {/* Sidebar Navigation */}
-
-
- {/* Main Content */}
-
-
- {children}
-
-
-
- {/* Task Notifications Panel */}
-
-
- {/* Knowledge Filter Panel */}
-
);
}
diff --git a/frontend/src/components/protected-route.tsx b/frontend/src/components/protected-route.tsx
index a5403e1a..a6f8bf62 100644
--- a/frontend/src/components/protected-route.tsx
+++ b/frontend/src/components/protected-route.tsx
@@ -3,7 +3,6 @@
import { Loader2 } from "lucide-react";
import { usePathname, useRouter } from "next/navigation";
import { useEffect } from "react";
-import { useGetSettingsQuery } from "@/app/api/queries/useGetSettingsQuery";
import { useAuth } from "@/contexts/auth-context";
interface ProtectedRouteProps {
@@ -12,10 +11,6 @@ interface ProtectedRouteProps {
export function ProtectedRoute({ children }: ProtectedRouteProps) {
const { isLoading, isAuthenticated, isNoAuthMode } = useAuth();
- const { data: settings = {}, isLoading: isSettingsLoading } =
- useGetSettingsQuery({
- enabled: isAuthenticated || isNoAuthMode,
- });
const router = useRouter();
const pathname = usePathname();
@@ -31,30 +26,22 @@ export function ProtectedRoute({ children }: ProtectedRouteProps) {
);
useEffect(() => {
- if (!isLoading && !isSettingsLoading && !isAuthenticated && !isNoAuthMode) {
+ if (!isLoading && !isAuthenticated && !isNoAuthMode) {
// Redirect to login with current path as redirect parameter
const redirectUrl = `/login?redirect=${encodeURIComponent(pathname)}`;
router.push(redirectUrl);
return;
}
-
- if (!isLoading && !isSettingsLoading && !settings.edited) {
- const updatedOnboarding = process.env.UPDATED_ONBOARDING === "true";
- router.push(updatedOnboarding ? "/new-onboarding" : "/onboarding");
- }
}, [
isLoading,
- isSettingsLoading,
isAuthenticated,
isNoAuthMode,
router,
pathname,
- isSettingsLoading,
- settings.edited,
]);
// Show loading state while checking authentication
- if (isLoading || isSettingsLoading) {
+ if (isLoading) {
return (
diff --git a/frontend/src/hooks/useChatStreaming.ts b/frontend/src/hooks/useChatStreaming.ts
new file mode 100644
index 00000000..6a7202e8
--- /dev/null
+++ b/frontend/src/hooks/useChatStreaming.ts
@@ -0,0 +1,492 @@
+import { useRef, useState } from "react";
+import type { FunctionCall, Message, SelectedFilters } from "@/app/chat/types";
+
+interface UseChatStreamingOptions {
+ endpoint?: string;
+ onComplete?: (message: Message, responseId: string | null) => void;
+ onError?: (error: Error) => void;
+}
+
+interface SendMessageOptions {
+ prompt: string;
+ previousResponseId?: string;
+ filters?: SelectedFilters;
+ limit?: number;
+ scoreThreshold?: number;
+}
+
+export function useChatStreaming({
+ endpoint = "/api/langflow",
+ onComplete,
+ onError,
+}: UseChatStreamingOptions = {}) {
+ const [streamingMessage, setStreamingMessage] = useState
(
+ null,
+ );
+ const [isLoading, setIsLoading] = useState(false);
+ const streamAbortRef = useRef(null);
+ const streamIdRef = useRef(0);
+
+ const sendMessage = async ({
+ prompt,
+ previousResponseId,
+ filters,
+ limit = 10,
+ scoreThreshold = 0,
+ }: SendMessageOptions) => {
+ try {
+ setIsLoading(true);
+
+ // Abort any existing stream before starting a new one
+ if (streamAbortRef.current) {
+ streamAbortRef.current.abort();
+ }
+
+ const controller = new AbortController();
+ streamAbortRef.current = controller;
+ const thisStreamId = ++streamIdRef.current;
+
+ const requestBody: {
+ prompt: string;
+ stream: boolean;
+ previous_response_id?: string;
+ filters?: SelectedFilters;
+ limit?: number;
+ scoreThreshold?: number;
+ } = {
+ prompt,
+ stream: true,
+ limit,
+ scoreThreshold,
+ };
+
+ if (previousResponseId) {
+ requestBody.previous_response_id = previousResponseId;
+ }
+
+ if (filters) {
+ requestBody.filters = filters;
+ }
+
+ const response = await fetch(endpoint, {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ },
+ body: JSON.stringify(requestBody),
+ signal: controller.signal,
+ });
+
+ if (!response.ok) {
+ throw new Error(`HTTP error! status: ${response.status}`);
+ }
+
+ const reader = response.body?.getReader();
+ if (!reader) {
+ throw new Error("No reader available");
+ }
+
+ const decoder = new TextDecoder();
+ let buffer = "";
+ let currentContent = "";
+ const currentFunctionCalls: FunctionCall[] = [];
+ let newResponseId: string | null = null;
+
+ // Initialize streaming message
+ if (!controller.signal.aborted && thisStreamId === streamIdRef.current) {
+ setStreamingMessage({
+ role: "assistant",
+ content: "",
+ timestamp: new Date(),
+ isStreaming: true,
+ });
+ }
+
+ try {
+ while (true) {
+ const { done, value } = await reader.read();
+ if (controller.signal.aborted || thisStreamId !== streamIdRef.current)
+ break;
+ if (done) break;
+
+ buffer += decoder.decode(value, { stream: true });
+
+ // Process complete lines (JSON objects)
+ const lines = buffer.split("\n");
+ buffer = lines.pop() || ""; // Keep incomplete line in buffer
+
+ for (const line of lines) {
+ if (line.trim()) {
+ try {
+ const chunk = JSON.parse(line);
+
+ // Extract response ID if present
+ if (chunk.id) {
+ newResponseId = chunk.id;
+ } else if (chunk.response_id) {
+ newResponseId = chunk.response_id;
+ }
+
+ // Handle OpenAI Chat Completions streaming format
+ if (chunk.object === "response.chunk" && chunk.delta) {
+ // Handle function calls in delta
+ if (chunk.delta.function_call) {
+ if (chunk.delta.function_call.name) {
+ const functionCall: FunctionCall = {
+ name: chunk.delta.function_call.name,
+ arguments: undefined,
+ status: "pending",
+ argumentsString:
+ chunk.delta.function_call.arguments || "",
+ };
+ currentFunctionCalls.push(functionCall);
+ } else if (chunk.delta.function_call.arguments) {
+ const lastFunctionCall =
+ currentFunctionCalls[currentFunctionCalls.length - 1];
+ if (lastFunctionCall) {
+ if (!lastFunctionCall.argumentsString) {
+ lastFunctionCall.argumentsString = "";
+ }
+ lastFunctionCall.argumentsString +=
+ chunk.delta.function_call.arguments;
+
+ if (lastFunctionCall.argumentsString.includes("}")) {
+ try {
+ const parsed = JSON.parse(
+ lastFunctionCall.argumentsString
+ );
+ lastFunctionCall.arguments = parsed;
+ lastFunctionCall.status = "completed";
+ } catch (e) {
+ // Arguments not yet complete
+ }
+ }
+ }
+ }
+ }
+ // Handle tool calls in delta
+ else if (
+ chunk.delta.tool_calls &&
+ Array.isArray(chunk.delta.tool_calls)
+ ) {
+ for (const toolCall of chunk.delta.tool_calls) {
+ if (toolCall.function) {
+ if (toolCall.function.name) {
+ const functionCall: FunctionCall = {
+ name: toolCall.function.name,
+ arguments: undefined,
+ status: "pending",
+ argumentsString: toolCall.function.arguments || "",
+ };
+ currentFunctionCalls.push(functionCall);
+ } else if (toolCall.function.arguments) {
+ const lastFunctionCall =
+ currentFunctionCalls[
+ currentFunctionCalls.length - 1
+ ];
+ if (lastFunctionCall) {
+ if (!lastFunctionCall.argumentsString) {
+ lastFunctionCall.argumentsString = "";
+ }
+ lastFunctionCall.argumentsString +=
+ toolCall.function.arguments;
+
+ if (
+ lastFunctionCall.argumentsString.includes("}")
+ ) {
+ try {
+ const parsed = JSON.parse(
+ lastFunctionCall.argumentsString
+ );
+ lastFunctionCall.arguments = parsed;
+ lastFunctionCall.status = "completed";
+ } catch (e) {
+ // Arguments not yet complete
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ // Handle content/text in delta
+ else if (chunk.delta.content) {
+ currentContent += chunk.delta.content;
+ }
+
+ // Handle finish reason
+ if (chunk.delta.finish_reason) {
+ currentFunctionCalls.forEach((fc) => {
+ if (fc.status === "pending" && fc.argumentsString) {
+ try {
+ fc.arguments = JSON.parse(fc.argumentsString);
+ fc.status = "completed";
+ } catch (e) {
+ fc.arguments = { raw: fc.argumentsString };
+ fc.status = "error";
+ }
+ }
+ });
+ }
+ }
+ // Handle Realtime API format - function call added
+ else if (
+ chunk.type === "response.output_item.added" &&
+ chunk.item?.type === "function_call"
+ ) {
+ let existing = currentFunctionCalls.find(
+ (fc) => fc.id === chunk.item.id
+ );
+ if (!existing) {
+ existing = [...currentFunctionCalls]
+ .reverse()
+ .find(
+ (fc) =>
+ fc.status === "pending" &&
+ !fc.id &&
+ fc.name === (chunk.item.tool_name || chunk.item.name)
+ );
+ }
+
+ if (existing) {
+ existing.id = chunk.item.id;
+ existing.type = chunk.item.type;
+ existing.name =
+ chunk.item.tool_name || chunk.item.name || existing.name;
+ existing.arguments =
+ chunk.item.inputs || existing.arguments;
+ } else {
+ const functionCall: FunctionCall = {
+ name:
+ chunk.item.tool_name || chunk.item.name || "unknown",
+ arguments: chunk.item.inputs || undefined,
+ status: "pending",
+ argumentsString: "",
+ id: chunk.item.id,
+ type: chunk.item.type,
+ };
+ currentFunctionCalls.push(functionCall);
+ }
+ }
+ // Handle Realtime API format - tool call added
+ else if (
+ chunk.type === "response.output_item.added" &&
+ chunk.item?.type?.includes("_call") &&
+ chunk.item?.type !== "function_call"
+ ) {
+ let existing = currentFunctionCalls.find(
+ (fc) => fc.id === chunk.item.id
+ );
+ if (!existing) {
+ existing = [...currentFunctionCalls]
+ .reverse()
+ .find(
+ (fc) =>
+ fc.status === "pending" &&
+ !fc.id &&
+ fc.name ===
+ (chunk.item.tool_name ||
+ chunk.item.name ||
+ chunk.item.type)
+ );
+ }
+
+ if (existing) {
+ existing.id = chunk.item.id;
+ existing.type = chunk.item.type;
+ existing.name =
+ chunk.item.tool_name ||
+ chunk.item.name ||
+ chunk.item.type ||
+ existing.name;
+ existing.arguments =
+ chunk.item.inputs || existing.arguments;
+ } else {
+ const functionCall = {
+ name:
+ chunk.item.tool_name ||
+ chunk.item.name ||
+ chunk.item.type ||
+ "unknown",
+ arguments: chunk.item.inputs || {},
+ status: "pending" as const,
+ id: chunk.item.id,
+ type: chunk.item.type,
+ };
+ currentFunctionCalls.push(functionCall);
+ }
+ }
+ // Handle function call done
+ else if (
+ chunk.type === "response.output_item.done" &&
+ chunk.item?.type === "function_call"
+ ) {
+ const functionCall = currentFunctionCalls.find(
+ (fc) =>
+ fc.id === chunk.item.id ||
+ fc.name === chunk.item.tool_name ||
+ fc.name === chunk.item.name
+ );
+
+ if (functionCall) {
+ functionCall.status =
+ chunk.item.status === "completed" ? "completed" : "error";
+ functionCall.id = chunk.item.id;
+ functionCall.type = chunk.item.type;
+ functionCall.name =
+ chunk.item.tool_name ||
+ chunk.item.name ||
+ functionCall.name;
+ functionCall.arguments =
+ chunk.item.inputs || functionCall.arguments;
+
+ if (chunk.item.results) {
+ functionCall.result = chunk.item.results;
+ }
+ }
+ }
+ // Handle tool call done with results
+ else if (
+ chunk.type === "response.output_item.done" &&
+ chunk.item?.type?.includes("_call") &&
+ chunk.item?.type !== "function_call"
+ ) {
+ const functionCall = currentFunctionCalls.find(
+ (fc) =>
+ fc.id === chunk.item.id ||
+ fc.name === chunk.item.tool_name ||
+ fc.name === chunk.item.name ||
+ fc.name === chunk.item.type ||
+ fc.name.includes(chunk.item.type.replace("_call", "")) ||
+ chunk.item.type.includes(fc.name)
+ );
+
+ if (functionCall) {
+ functionCall.arguments =
+ chunk.item.inputs || functionCall.arguments;
+ functionCall.status =
+ chunk.item.status === "completed" ? "completed" : "error";
+ functionCall.id = chunk.item.id;
+ functionCall.type = chunk.item.type;
+
+ if (chunk.item.results) {
+ functionCall.result = chunk.item.results;
+ }
+ } else {
+ const newFunctionCall = {
+ name:
+ chunk.item.tool_name ||
+ chunk.item.name ||
+ chunk.item.type ||
+ "unknown",
+ arguments: chunk.item.inputs || {},
+ status: "completed" as const,
+ id: chunk.item.id,
+ type: chunk.item.type,
+ result: chunk.item.results,
+ };
+ currentFunctionCalls.push(newFunctionCall);
+ }
+ }
+ // Handle text output streaming (Realtime API)
+ else if (chunk.type === "response.output_text.delta") {
+ currentContent += chunk.delta || "";
+ }
+ // Handle OpenRAG backend format
+ else if (chunk.output_text) {
+ currentContent += chunk.output_text;
+ } else if (chunk.delta) {
+ if (typeof chunk.delta === "string") {
+ currentContent += chunk.delta;
+ } else if (typeof chunk.delta === "object") {
+ if (chunk.delta.content) {
+ currentContent += chunk.delta.content;
+ } else if (chunk.delta.text) {
+ currentContent += chunk.delta.text;
+ }
+ }
+ }
+
+ // Update streaming message in real-time
+ if (
+ !controller.signal.aborted &&
+ thisStreamId === streamIdRef.current
+ ) {
+ setStreamingMessage({
+ role: "assistant",
+ content: currentContent,
+ functionCalls:
+ currentFunctionCalls.length > 0
+ ? [...currentFunctionCalls]
+ : undefined,
+ timestamp: new Date(),
+ isStreaming: true,
+ });
+ }
+ } catch (parseError) {
+ console.warn("Failed to parse chunk:", line, parseError);
+ }
+ }
+ }
+ }
+ } finally {
+ reader.releaseLock();
+ }
+
+ // Finalize the message
+ const finalMessage: Message = {
+ role: "assistant",
+ content: currentContent,
+ functionCalls:
+ currentFunctionCalls.length > 0 ? currentFunctionCalls : undefined,
+ timestamp: new Date(),
+ isStreaming: false,
+ };
+
+ if (!controller.signal.aborted && thisStreamId === streamIdRef.current) {
+ // Clear streaming message and call onComplete with final message
+ setStreamingMessage(null);
+ onComplete?.(finalMessage, newResponseId);
+ return finalMessage;
+ }
+
+ return null;
+ } catch (error) {
+ // If stream was aborted, don't handle as error
+ if (streamAbortRef.current?.signal.aborted) {
+ return null;
+ }
+
+ console.error("SSE Stream error:", error);
+ setStreamingMessage(null);
+ onError?.(error as Error);
+
+ const errorMessage: Message = {
+ role: "assistant",
+ content:
+ "Sorry, I couldn't connect to the chat service. Please try again.",
+ timestamp: new Date(),
+ isStreaming: false,
+ };
+
+ return errorMessage;
+ } finally {
+ setIsLoading(false);
+ }
+ };
+
+ const abortStream = () => {
+ if (streamAbortRef.current) {
+ streamAbortRef.current.abort();
+ }
+ setStreamingMessage(null);
+ setIsLoading(false);
+ };
+
+ return {
+ streamingMessage,
+ isLoading,
+ sendMessage,
+ abortStream,
+ };
+}
diff --git a/frontend/src/lib/constants.ts b/frontend/src/lib/constants.ts
index 9ce34634..dfd7358a 100644
--- a/frontend/src/lib/constants.ts
+++ b/frontend/src/lib/constants.ts
@@ -3,7 +3,7 @@
*/
export const DEFAULT_AGENT_SETTINGS = {
llm_model: "gpt-4o-mini",
- system_prompt: "You are a helpful assistant that can use tools to answer questions and perform tasks."
+ system_prompt: "You are a helpful assistant that can use tools to answer questions and perform tasks. You are part of OpenRAG, an assistant that analyzes documents and provides informations about them. When asked about what is OpenRAG, answer the following:\n\n\"OpenRAG is an open-source package for building agentic RAG systems. It supports integration with a wide range of orchestration tools, vector databases, and LLM providers. OpenRAG connects and amplifies three popular, proven open-source projects into one powerful platform:\n\n**Langflow** – Langflow is a powerful tool to build and deploy AI agents and MCP servers [Read more](https://www.langflow.org/)\n\n**OpenSearch** – Langflow is a powerful tool to build and deploy AI agents and MCP servers [Read more](https://opensearch.org/)\n\n**Docling** – Langflow is a powerful tool to build and deploy AI agents and MCP servers [Read more](https://www.docling.ai/)\""
} as const;
/**
@@ -22,4 +22,17 @@ export const DEFAULT_KNOWLEDGE_SETTINGS = {
*/
export const UI_CONSTANTS = {
MAX_SYSTEM_PROMPT_CHARS: 2000,
-} as const;
\ No newline at end of file
+} as const;
+
+export const ANIMATION_DURATION = 0.4;
+export const SIDEBAR_WIDTH = 280;
+export const HEADER_HEIGHT = 54;
+export const TOTAL_ONBOARDING_STEPS = 3;
+
+/**
+ * Local Storage Keys
+ */
+export const ONBOARDING_STEP_KEY = "onboarding_current_step";
+
+export const FILES_REGEX =
+ /(?<=I'm uploading a document called ['"])[^'"]+\.[^.]+(?=['"]\. Here is its content:)/;
\ No newline at end of file
diff --git a/frontend/tailwind.config.ts b/frontend/tailwind.config.ts
index a65cef05..6567616b 100644
--- a/frontend/tailwind.config.ts
+++ b/frontend/tailwind.config.ts
@@ -72,6 +72,14 @@ const config = {
height: "0",
},
},
+ shimmer: {
+ "0%": {
+ backgroundPosition: "200% 0",
+ },
+ "100%": {
+ backgroundPosition: "-200% 0",
+ },
+ },
},
animation: {
overlayShow: "overlayShow 400ms cubic-bezier(0.16, 1, 0.3, 1)",
@@ -79,6 +87,7 @@ const config = {
wiggle: "wiggle 150ms ease-in-out 1",
"accordion-down": "accordion-down 0.2s ease-out",
"accordion-up": "accordion-up 0.2s ease-out",
+ shimmer: "shimmer 3s ease-in-out infinite",
},
colors: {
border: "hsl(var(--border))",
diff --git a/src/agent.py b/src/agent.py
index eceb2ac4..84394ebc 100644
--- a/src/agent.py
+++ b/src/agent.py
@@ -34,7 +34,7 @@ def get_conversation_thread(user_id: str, previous_response_id: str = None):
"messages": [
{
"role": "system",
- "content": "You are a helpful assistant. Always use the search_tools to answer questions.",
+ "content": "You are a helpful assistant that can use tools to answer questions and perform tasks. You are part of OpenRAG, an assistant that analyzes documents and provides informations about them. When asked about what is OpenRAG, answer the following:\n\n\"OpenRAG is an open-source package for building agentic RAG systems. It supports integration with a wide range of orchestration tools, vector databases, and LLM providers. OpenRAG connects and amplifies three popular, proven open-source projects into one powerful platform:\n\n**Langflow** – Langflow is a powerful tool to build and deploy AI agents and MCP servers [Read more](https://www.langflow.org/)\n\n**OpenSearch** – Langflow is a powerful tool to build and deploy AI agents and MCP servers [Read more](https://opensearch.org/)\n\n**Docling** – Langflow is a powerful tool to build and deploy AI agents and MCP servers [Read more](https://www.docling.ai/)\"",
}
],
"previous_response_id": previous_response_id, # Parent response_id for branching
diff --git a/src/api/settings.py b/src/api/settings.py
index b4a15745..5fc30cf5 100644
--- a/src/api/settings.py
+++ b/src/api/settings.py
@@ -424,13 +424,10 @@ async def onboarding(request, flows_service):
# Get current configuration
current_config = get_openrag_config()
- # Check if config is NOT marked as edited (only allow onboarding if not yet configured)
+ # Warn if config was already edited (onboarding being re-run)
if current_config.edited:
- return JSONResponse(
- {
- "error": "Configuration has already been edited. Use /settings endpoint for updates."
- },
- status_code=403,
+ logger.warning(
+ "Onboarding is being run although configuration was already edited before"
)
# Parse request body
diff --git a/src/api/upload.py b/src/api/upload.py
index 2bc15dea..39270cbc 100644
--- a/src/api/upload.py
+++ b/src/api/upload.py
@@ -99,13 +99,12 @@ async def upload_context(
# Get optional parameters
previous_response_id = form.get("previous_response_id")
endpoint = form.get("endpoint", "langflow")
-
- jwt_token = session_manager.get_effective_jwt_token(user_id, request.state.jwt_token)
-
+
# Get user info from request state (set by auth middleware)
user = request.state.user
user_id = user.user_id if user else None
+ jwt_token = session_manager.get_effective_jwt_token(user_id, request.state.jwt_token)
# Process document and extract content
doc_result = await document_service.process_upload_context(upload_file, filename)