Merge pull request #289 from langflow-ai/fix/provider-design

fix: adds new provider design, fixes issues with auth mode
This commit is contained in:
Mike Fortman 2025-10-23 11:47:18 -05:00 committed by GitHub
commit 883877b4e7
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 343 additions and 359 deletions

View file

@ -14,7 +14,7 @@ const AccordionItem = React.forwardRef<
>(({ className, ...props }, ref) => (
<AccordionPrimitive.Item
ref={ref}
className={cn("border rounded-md", className)}
className={cn("border rounded-xl", className)}
{...props}
/>
));

View file

@ -13,7 +13,7 @@ const TabsList = React.forwardRef<
<TabsPrimitive.List
ref={ref}
className={cn(
"inline-flex h-12 gap-3 items-center justify-center p-0 text-muted-foreground w-full",
"inline-flex h-fit gap-3 items-center justify-center p-0 text-muted-foreground w-full",
className,
)}
{...props}
@ -28,7 +28,7 @@ const TabsTrigger = React.forwardRef<
<TabsPrimitive.Trigger
ref={ref}
className={cn(
"inline-flex w-full h-full border border-border gap-1.5 items-center justify-center whitespace-nowrap rounded-lg px-3 py-1.5 text-sm font-medium transition-all focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:pointer-events-none disabled:opacity-50 data-[state=active]:border-accent-pink-foreground data-[state=active]:text-foreground",
"flex flex-col items-start justify-between p-5 gap-4 w-full h-full border border-border whitespace-nowrap rounded-lg text-sm font-medium transition-all focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:pointer-events-none disabled:opacity-50 data-[state=active]:border-muted-foreground data-[state=active]:text-foreground",
className,
)}
{...props}

View file

@ -75,20 +75,6 @@ export function AdvancedOnboarding({
/>
</LabelWrapper>
)}
{(hasLanguageModels || hasEmbeddingModels) && !updatedOnboarding && <Separator />}
{!updatedOnboarding && (
<LabelWrapper
label="Sample dataset"
description="Load sample data to chat with immediately."
id="sample-dataset"
flex
>
<Switch
checked={sampleDataset}
onCheckedChange={setSampleDataset}
/>
</LabelWrapper>
)}
</AccordionContent>
</AccordionItem>
</Accordion>

View file

@ -7,154 +7,143 @@ import type { OnboardingVariables } from "../../api/mutations/useOnboardingMutat
import { useGetOllamaModelsQuery } from "../../api/queries/useGetModelsQuery";
import { useModelSelection } from "../hooks/useModelSelection";
import { useUpdateSettings } from "../hooks/useUpdateSettings";
import { AdvancedOnboarding } from "./advanced";
import { ModelSelector } from "./model-selector";
export function OllamaOnboarding({
setSettings,
sampleDataset,
setSampleDataset,
setSettings,
sampleDataset,
setSampleDataset,
}: {
setSettings: (settings: OnboardingVariables) => void;
sampleDataset: boolean;
setSampleDataset: (dataset: boolean) => void;
setSettings: (settings: OnboardingVariables) => void;
sampleDataset: boolean;
setSampleDataset: (dataset: boolean) => void;
}) {
const [endpoint, setEndpoint] = useState(`http://localhost:11434`);
const [showConnecting, setShowConnecting] = useState(false);
const debouncedEndpoint = useDebouncedValue(endpoint, 500);
const [endpoint, setEndpoint] = useState(`http://localhost:11434`);
const [showConnecting, setShowConnecting] = useState(false);
const debouncedEndpoint = useDebouncedValue(endpoint, 500);
// Fetch models from API when endpoint is provided (debounced)
const {
data: modelsData,
isLoading: isLoadingModels,
error: modelsError,
} = useGetOllamaModelsQuery(
debouncedEndpoint ? { endpoint: debouncedEndpoint } : undefined,
);
// Fetch models from API when endpoint is provided (debounced)
const {
data: modelsData,
isLoading: isLoadingModels,
error: modelsError,
} = useGetOllamaModelsQuery(
debouncedEndpoint ? { endpoint: debouncedEndpoint } : undefined,
);
// Use custom hook for model selection logic
const {
languageModel,
embeddingModel,
setLanguageModel,
setEmbeddingModel,
languageModels,
embeddingModels,
} = useModelSelection(modelsData);
// Use custom hook for model selection logic
const {
languageModel,
embeddingModel,
setLanguageModel,
setEmbeddingModel,
languageModels,
embeddingModels,
} = useModelSelection(modelsData);
// Handle delayed display of connecting state
useEffect(() => {
let timeoutId: NodeJS.Timeout;
// Handle delayed display of connecting state
useEffect(() => {
let timeoutId: NodeJS.Timeout;
if (debouncedEndpoint && isLoadingModels) {
timeoutId = setTimeout(() => {
setShowConnecting(true);
}, 500);
} else {
setShowConnecting(false);
}
if (debouncedEndpoint && isLoadingModels) {
timeoutId = setTimeout(() => {
setShowConnecting(true);
}, 500);
} else {
setShowConnecting(false);
}
return () => {
if (timeoutId) {
clearTimeout(timeoutId);
}
};
}, [debouncedEndpoint, isLoadingModels]);
return () => {
if (timeoutId) {
clearTimeout(timeoutId);
}
};
}, [debouncedEndpoint, isLoadingModels]);
const handleSampleDatasetChange = (dataset: boolean) => {
setSampleDataset(dataset);
};
// Update settings when values change
useUpdateSettings(
"ollama",
{
endpoint,
languageModel,
embeddingModel,
},
setSettings,
);
// Update settings when values change
useUpdateSettings(
"ollama",
{
endpoint,
languageModel,
embeddingModel,
},
setSettings,
);
// Check validation state based on models query
const hasConnectionError = debouncedEndpoint && modelsError;
const hasNoModels =
modelsData &&
!modelsData.language_models?.length &&
!modelsData.embedding_models?.length;
// Check validation state based on models query
const hasConnectionError = debouncedEndpoint && modelsError;
const hasNoModels =
modelsData &&
!modelsData.language_models?.length &&
!modelsData.embedding_models?.length;
return (
<>
<div className="space-y-4">
<div className="space-y-1">
<LabelInput
label="Ollama Base URL"
helperText="Base URL of your Ollama server"
id="api-endpoint"
required
placeholder="http://localhost:11434"
value={endpoint}
onChange={(e) => setEndpoint(e.target.value)}
/>
{showConnecting && (
<p className="text-mmd text-muted-foreground">
Connecting to Ollama server...
</p>
)}
{hasConnectionError && (
<p className="text-mmd text-accent-amber-foreground">
Cant reach Ollama at {debouncedEndpoint}. Update the base URL or
start the server.
</p>
)}
{hasNoModels && (
<p className="text-mmd text-accent-amber-foreground">
No models found. Install embedding and agent models on your Ollama
server.
</p>
)}
</div>
<LabelWrapper
label="Embedding model"
helperText="Model used for knowledge ingest and retrieval"
id="embedding-model"
required={true}
>
<ModelSelector
options={embeddingModels}
icon={<OllamaLogo className="w-4 h-4" />}
noOptionsPlaceholder={
isLoadingModels
? "Loading models..."
: "No embedding models detected. Install an embedding model to continue."
}
value={embeddingModel}
onValueChange={setEmbeddingModel}
/>
</LabelWrapper>
<LabelWrapper
label="Language model"
helperText="Model used for chat"
id="embedding-model"
required={true}
>
<ModelSelector
options={languageModels}
icon={<OllamaLogo className="w-4 h-4" />}
noOptionsPlaceholder={
isLoadingModels
? "Loading models..."
: "No language models detected. Install a language model to continue."
}
value={languageModel}
onValueChange={setLanguageModel}
/>
</LabelWrapper>
</div>
<AdvancedOnboarding
sampleDataset={sampleDataset}
setSampleDataset={handleSampleDatasetChange}
/>
</>
);
return (
<div className="space-y-4">
<div className="space-y-1">
<LabelInput
label="Ollama Base URL"
helperText="Base URL of your Ollama server"
id="api-endpoint"
required
placeholder="http://localhost:11434"
value={endpoint}
onChange={(e) => setEndpoint(e.target.value)}
/>
{showConnecting && (
<p className="text-mmd text-muted-foreground">
Connecting to Ollama server...
</p>
)}
{hasConnectionError && (
<p className="text-mmd text-accent-amber-foreground">
Cant reach Ollama at {debouncedEndpoint}. Update the base URL or
start the server.
</p>
)}
{hasNoModels && (
<p className="text-mmd text-accent-amber-foreground">
No models found. Install embedding and agent models on your Ollama
server.
</p>
)}
</div>
<LabelWrapper
label="Embedding model"
helperText="Model used for knowledge ingest and retrieval"
id="embedding-model"
required={true}
>
<ModelSelector
options={embeddingModels}
icon={<OllamaLogo className="w-4 h-4" />}
noOptionsPlaceholder={
isLoadingModels
? "Loading models..."
: "No embedding models detected. Install an embedding model to continue."
}
value={embeddingModel}
onValueChange={setEmbeddingModel}
/>
</LabelWrapper>
<LabelWrapper
label="Language model"
helperText="Model used for chat"
id="embedding-model"
required={true}
>
<ModelSelector
options={languageModels}
icon={<OllamaLogo className="w-4 h-4" />}
noOptionsPlaceholder={
isLoadingModels
? "Loading models..."
: "No language models detected. Install a language model to continue."
}
value={languageModel}
onValueChange={setLanguageModel}
/>
</LabelWrapper>
</div>
);
}

View file

@ -4,8 +4,8 @@ import { AnimatePresence, motion } from "framer-motion";
import { useEffect, useState } from "react";
import { toast } from "sonner";
import {
type OnboardingVariables,
useOnboardingMutation,
type OnboardingVariables,
useOnboardingMutation,
} from "@/app/api/mutations/useOnboardingMutation";
import { useGetTasksQuery } from "@/app/api/queries/useGetTasksQuery";
import { useDoclingHealth } from "@/components/docling-health-banner";
@ -14,24 +14,25 @@ import OllamaLogo from "@/components/logo/ollama-logo";
import OpenAILogo from "@/components/logo/openai-logo";
import { Button } from "@/components/ui/button";
import {
Card,
CardContent,
CardFooter,
CardHeader,
Card,
CardContent,
CardFooter,
CardHeader,
} from "@/components/ui/card";
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/ui/tooltip";
import { cn } from "@/lib/utils";
import { AnimatedProviderSteps } from "./animated-provider-steps";
import { IBMOnboarding } from "./ibm-onboarding";
import { OllamaOnboarding } from "./ollama-onboarding";
import { OpenAIOnboarding } from "./openai-onboarding";
interface OnboardingCardProps {
onComplete: () => void;
onComplete: () => void;
}
@ -45,220 +46,226 @@ const STEP_LIST = [
const TOTAL_PROVIDER_STEPS = STEP_LIST.length;
const OnboardingCard = ({ onComplete }: OnboardingCardProps) => {
const updatedOnboarding = process.env.UPDATED_ONBOARDING === "true";
const { isHealthy: isDoclingHealthy } = useDoclingHealth();
const updatedOnboarding = process.env.UPDATED_ONBOARDING === "true";
const { isHealthy: isDoclingHealthy } = useDoclingHealth();
const [modelProvider, setModelProvider] = useState<string>("openai");
const [modelProvider, setModelProvider] = useState<string>("openai");
const [sampleDataset, setSampleDataset] = useState<boolean>(true);
const [sampleDataset, setSampleDataset] = useState<boolean>(true);
const handleSetModelProvider = (provider: string) => {
setModelProvider(provider);
setSettings({
model_provider: provider,
embedding_model: "",
llm_model: "",
});
};
const handleSetModelProvider = (provider: string) => {
setModelProvider(provider);
setSettings({
model_provider: provider,
embedding_model: "",
llm_model: "",
});
};
const [settings, setSettings] = useState<OnboardingVariables>({
model_provider: modelProvider,
embedding_model: "",
llm_model: "",
});
const [settings, setSettings] = useState<OnboardingVariables>({
model_provider: modelProvider,
embedding_model: "",
llm_model: "",
});
const [currentStep, setCurrentStep] = useState<number | null>(null);
const [currentStep, setCurrentStep] = useState<number | null>(null);
// Query tasks to track completion
const { data: tasks } = useGetTasksQuery({
enabled: currentStep !== null, // Only poll when onboarding has started
refetchInterval: currentStep !== null ? 1000 : false, // Poll every 1 second during onboarding
});
// Query tasks to track completion
const { data: tasks } = useGetTasksQuery({
enabled: currentStep !== null, // Only poll when onboarding has started
refetchInterval: currentStep !== null ? 1000 : false, // Poll every 1 second during onboarding
});
// Monitor tasks and call onComplete when all tasks are done
useEffect(() => {
if (currentStep === null || !tasks) {
return;
}
// Monitor tasks and call onComplete when all tasks are done
useEffect(() => {
if (currentStep === null || !tasks) {
return;
}
// Check if there are any active tasks (pending, running, or processing)
const activeTasks = tasks.find(
(task) =>
task.status === "pending" ||
task.status === "running" ||
task.status === "processing",
);
// Check if there are any active tasks (pending, running, or processing)
const activeTasks = tasks.find(
(task) =>
task.status === "pending" ||
task.status === "running" ||
task.status === "processing",
);
// If no active tasks and we've started onboarding, complete it
if (
(!activeTasks || (activeTasks.processed_files ?? 0) > 0) &&
tasks.length > 0
) {
// Set to final step to show "Done"
setCurrentStep(TOTAL_PROVIDER_STEPS);
// Wait a bit before completing
setTimeout(() => {
onComplete();
}, 1000);
}
}, [tasks, currentStep, onComplete]);
// If no active tasks and we've started onboarding, complete it
if (
(!activeTasks || (activeTasks.processed_files ?? 0) > 0) &&
tasks.length > 0
) {
// Set to final step to show "Done"
setCurrentStep(TOTAL_PROVIDER_STEPS);
// Wait a bit before completing
setTimeout(() => {
onComplete();
}, 1000);
}
}, [tasks, currentStep, onComplete]);
// Mutations
const onboardingMutation = useOnboardingMutation({
onSuccess: (data) => {
console.log("Onboarding completed successfully", data);
setCurrentStep(0);
},
onError: (error) => {
toast.error("Failed to complete onboarding", {
description: error.message,
});
},
});
// Mutations
const onboardingMutation = useOnboardingMutation({
onSuccess: (data) => {
console.log("Onboarding completed successfully", data);
setCurrentStep(0);
},
onError: (error) => {
toast.error("Failed to complete onboarding", {
description: error.message,
});
},
});
const handleComplete = () => {
if (
!settings.model_provider ||
!settings.llm_model ||
!settings.embedding_model
) {
toast.error("Please complete all required fields");
return;
}
const handleComplete = () => {
if (
!settings.model_provider ||
!settings.llm_model ||
!settings.embedding_model
) {
toast.error("Please complete all required fields");
return;
}
// Prepare onboarding data
const onboardingData: OnboardingVariables = {
model_provider: settings.model_provider,
llm_model: settings.llm_model,
embedding_model: settings.embedding_model,
sample_data: sampleDataset,
};
// Prepare onboarding data
const onboardingData: OnboardingVariables = {
model_provider: settings.model_provider,
llm_model: settings.llm_model,
embedding_model: settings.embedding_model,
sample_data: sampleDataset,
};
// Add API key if available
if (settings.api_key) {
onboardingData.api_key = settings.api_key;
}
// Add API key if available
if (settings.api_key) {
onboardingData.api_key = settings.api_key;
}
// Add endpoint if available
if (settings.endpoint) {
onboardingData.endpoint = settings.endpoint;
}
// Add endpoint if available
if (settings.endpoint) {
onboardingData.endpoint = settings.endpoint;
}
// Add project_id if available
if (settings.project_id) {
onboardingData.project_id = settings.project_id;
}
// Add project_id if available
if (settings.project_id) {
onboardingData.project_id = settings.project_id;
}
onboardingMutation.mutate(onboardingData);
setCurrentStep(0);
};
onboardingMutation.mutate(onboardingData);
setCurrentStep(0);
};
const isComplete =
!!settings.llm_model && !!settings.embedding_model && isDoclingHealthy;
const isComplete =
!!settings.llm_model && !!settings.embedding_model && isDoclingHealthy;
return (
<AnimatePresence mode="wait">
{currentStep === null ? (
<motion.div
key="onboarding-form"
initial={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: -24 }}
transition={{ duration: 0.4, ease: "easeInOut" }}
>
<Card
className={`w-full max-w-[600px] ${
updatedOnboarding ? "border-none" : ""
}`}
>
<Tabs
defaultValue={modelProvider}
onValueChange={handleSetModelProvider}
>
<CardHeader className={`${updatedOnboarding ? "px-0" : ""}`}>
<TabsList>
<TabsTrigger value="openai">
<OpenAILogo className="w-4 h-4" />
OpenAI
</TabsTrigger>
<TabsTrigger value="watsonx">
<IBMLogo className="w-4 h-4" />
IBM watsonx.ai
</TabsTrigger>
<TabsTrigger value="ollama">
<OllamaLogo className="w-4 h-4" />
Ollama
</TabsTrigger>
</TabsList>
</CardHeader>
<CardContent className={`${updatedOnboarding ? "px-0" : ""}`}>
<TabsContent value="openai">
<OpenAIOnboarding
setSettings={setSettings}
sampleDataset={sampleDataset}
setSampleDataset={setSampleDataset}
/>
</TabsContent>
<TabsContent value="watsonx">
<IBMOnboarding
setSettings={setSettings}
sampleDataset={sampleDataset}
setSampleDataset={setSampleDataset}
/>
</TabsContent>
<TabsContent value="ollama">
<OllamaOnboarding
setSettings={setSettings}
sampleDataset={sampleDataset}
setSampleDataset={setSampleDataset}
/>
</TabsContent>
</CardContent>
</Tabs>
<CardFooter
className={`flex ${updatedOnboarding ? "px-0" : "justify-end"}`}
>
<Tooltip>
<TooltipTrigger asChild>
<div>
<Button
size="sm"
onClick={handleComplete}
disabled={!isComplete}
loading={onboardingMutation.isPending}
>
<span className="select-none">Complete</span>
</Button>
</div>
</TooltipTrigger>
{!isComplete && (
<TooltipContent>
{!!settings.llm_model &&
!!settings.embedding_model &&
!isDoclingHealthy
? "docling-serve must be running to continue"
: "Please fill in all required fields"}
</TooltipContent>
)}
</Tooltip>
</CardFooter>
</Card>
</motion.div>
) : (
<motion.div
key="provider-steps"
initial={{ opacity: 0, y: 24 }}
animate={{ opacity: 1, y: 0 }}
transition={{ duration: 0.4, ease: "easeInOut" }}
>
<AnimatedProviderSteps
return (
<AnimatePresence mode="wait">
{currentStep === null ? (
<motion.div
key="onboarding-form"
initial={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: -24 }}
transition={{ duration: 0.4, ease: "easeInOut" }}
>
<div className={`w-full max-w-[600px] flex flex-col gap-6`}>
<Tabs
defaultValue={modelProvider}
onValueChange={handleSetModelProvider}
>
<TabsList className="mb-4">
<TabsTrigger
value="openai"
>
<div className={cn("flex items-center justify-center gap-2 w-8 h-8 rounded-md", modelProvider === "openai" ? "bg-white" : "bg-muted")}>
<OpenAILogo className={cn("w-4 h-4 shrink-0", modelProvider === "openai" ? "text-black" : "text-muted-foreground")} />
</div>
OpenAI
</TabsTrigger>
<TabsTrigger
value="watsonx"
>
<div className={cn("flex items-center justify-center gap-2 w-8 h-8 rounded-md", modelProvider === "watsonx" ? "bg-[#1063FE]" : "bg-muted")}>
<IBMLogo className={cn("w-4 h-4 shrink-0", modelProvider === "watsonx" ? "text-white" : "text-muted-foreground")} />
</div>
IBM watsonx.ai
</TabsTrigger>
<TabsTrigger
value="ollama"
>
<div className={cn("flex items-center justify-center gap-2 w-8 h-8 rounded-md", modelProvider === "ollama" ? "bg-white" : "bg-muted")}>
<OllamaLogo
className={cn(
"w-4 h-4 shrink-0",
modelProvider === "ollama" ? "text-black" : "text-muted-foreground",
)}
/>
</div>
Ollama
</TabsTrigger>
</TabsList>
<TabsContent value="openai">
<OpenAIOnboarding
setSettings={setSettings}
sampleDataset={sampleDataset}
setSampleDataset={setSampleDataset}
/>
</TabsContent>
<TabsContent value="watsonx">
<IBMOnboarding
setSettings={setSettings}
sampleDataset={sampleDataset}
setSampleDataset={setSampleDataset}
/>
</TabsContent>
<TabsContent value="ollama">
<OllamaOnboarding
setSettings={setSettings}
sampleDataset={sampleDataset}
setSampleDataset={setSampleDataset}
/>
</TabsContent>
</Tabs>
<Tooltip>
<TooltipTrigger asChild>
<div>
<Button
size="sm"
onClick={handleComplete}
disabled={!isComplete}
loading={onboardingMutation.isPending}
>
<span className="select-none">Complete</span>
</Button>
</div>
</TooltipTrigger>
{!isComplete && (
<TooltipContent>
{!!settings.llm_model &&
!!settings.embedding_model &&
!isDoclingHealthy
? "docling-serve must be running to continue"
: "Please fill in all required fields"}
</TooltipContent>
)}
</Tooltip>
</div>
</motion.div>
) : (
<motion.div
key="provider-steps"
initial={{ opacity: 0, y: 24 }}
animate={{ opacity: 1, y: 0 }}
transition={{ duration: 0.4, ease: "easeInOut" }}
>
<AnimatedProviderSteps
currentStep={currentStep}
setCurrentStep={setCurrentStep}
steps={STEP_LIST}
/>
</motion.div>
)}
</AnimatePresence>
);
</motion.div>
)}
</AnimatePresence>
);
};
export default OnboardingCard;

View file

@ -28,7 +28,7 @@ export function ChatRenderer({
settings,
children,
}: {
settings: Settings;
settings: Settings | undefined;
children: React.ReactNode;
}) {
const pathname = usePathname();
@ -51,7 +51,8 @@ export function ChatRenderer({
if (typeof window === "undefined") return false;
const savedStep = localStorage.getItem(ONBOARDING_STEP_KEY);
// Show layout if settings.edited is true and if no onboarding step is saved
return !!settings?.edited && savedStep === null;
const isEdited = settings?.edited ?? true;
return isEdited ? savedStep === null : false;
});
// Only fetch conversations on chat page

View file

@ -35,9 +35,10 @@ export function LayoutWrapper({ children }: { children: React.ReactNode }) {
const isUnhealthy = health?.status === "unhealthy" || isError;
const isBannerVisible = !isHealthLoading && isUnhealthy;
const isSettingsLoadingOrError = isSettingsLoading || !settings;
// Show loading state when backend isn't ready
if (isLoading || isSettingsLoading || !settings) {
if (isLoading || (isSettingsLoadingOrError && (isNoAuthMode || isAuthenticated))) {
return (
<div className="min-h-screen flex items-center justify-center bg-background">
<div className="flex flex-col items-center gap-4">