Merge pull request #616 from langflow-ai/fix/watsonx_fixes

This commit is contained in:
Edwin Jose 2025-12-05 17:55:20 -05:00 committed by GitHub
commit ddee4679b9
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
12 changed files with 2240 additions and 1825 deletions

View file

@ -4,6 +4,7 @@ import {
useQueryClient,
} from "@tanstack/react-query";
import type { EndpointType } from "@/contexts/chat-context";
import { useChat } from "@/contexts/chat-context";
export interface RawConversation {
response_id: string;
@ -50,6 +51,7 @@ export const useGetConversationsQuery = (
options?: Omit<UseQueryOptions, "queryKey" | "queryFn">,
) => {
const queryClient = useQueryClient();
const { isOnboardingComplete } = useChat();
async function getConversations(context: { signal?: AbortSignal }): Promise<ChatConversation[]> {
try {
@ -95,6 +97,11 @@ export const useGetConversationsQuery = (
}
}
// Extract enabled from options and combine with onboarding completion check
// Query is only enabled if onboarding is complete AND the caller's enabled condition is met
const callerEnabled = options?.enabled ?? true;
const enabled = isOnboardingComplete && callerEnabled;
const queryResult = useQuery(
{
queryKey: ["conversations", endpoint, refreshTrigger],
@ -106,6 +113,7 @@ export const useGetConversationsQuery = (
refetchOnMount: false, // Don't refetch on every mount
refetchOnWindowFocus: false, // Don't refetch when window regains focus
...options,
enabled, // Override enabled after spreading options to ensure onboarding check is applied
},
queryClient,
);

View file

@ -3,6 +3,8 @@ import {
useQuery,
useQueryClient,
} from "@tanstack/react-query";
import { useChat } from "@/contexts/chat-context";
import { useProviderHealthQuery } from "./useProviderHealthQuery";
type Nudge = string;
@ -27,6 +29,13 @@ export const useGetNudgesQuery = (
) => {
const { chatId, filters, limit, scoreThreshold } = params ?? {};
const queryClient = useQueryClient();
const { isOnboardingComplete } = useChat();
// Check if LLM provider is healthy
// If health data is not available yet, assume healthy (optimistic)
// Only disable if health data exists and shows LLM error
const { data: health } = useProviderHealthQuery();
const isLLMHealthy = health === undefined || (health?.status === "healthy" && !health?.llm_error);
function cancel() {
queryClient.removeQueries({
@ -77,6 +86,11 @@ export const useGetNudgesQuery = (
}
}
// Extract enabled from options and combine with onboarding completion and LLM health checks
// Query is only enabled if onboarding is complete AND LLM provider is healthy AND the caller's enabled condition is met
const callerEnabled = options?.enabled ?? true;
const enabled = isOnboardingComplete && isLLMHealthy && callerEnabled;
const queryResult = useQuery(
{
queryKey: ["nudges", chatId, filters, limit, scoreThreshold],
@ -91,6 +105,7 @@ export const useGetNudgesQuery = (
return Array.isArray(data) && data.length === 0 ? 5000 : false;
},
...options,
enabled, // Override enabled after spreading options to ensure onboarding check is applied
},
queryClient,
);

View file

@ -5,6 +5,7 @@ import {
} from "@tanstack/react-query";
import { useChat } from "@/contexts/chat-context";
import { useGetSettingsQuery } from "./useGetSettingsQuery";
import { useGetTasksQuery } from "./useGetTasksQuery";
export interface ProviderHealthDetails {
llm_model: string;
@ -40,11 +41,20 @@ export const useProviderHealthQuery = (
) => {
const queryClient = useQueryClient();
// Get chat error state from context (ChatProvider wraps the entire app in layout.tsx)
const { hasChatError, setChatError } = useChat();
// Get chat error state and onboarding completion from context (ChatProvider wraps the entire app in layout.tsx)
const { hasChatError, setChatError, isOnboardingComplete } = useChat();
const { data: settings = {} } = useGetSettingsQuery();
// Check if there are any active ingestion tasks
const { data: tasks = [] } = useGetTasksQuery();
const hasActiveIngestion = tasks.some(
(task) =>
task.status === "pending" ||
task.status === "running" ||
task.status === "processing",
);
async function checkProviderHealth(): Promise<ProviderHealthResponse> {
try {
const url = new URL("/api/provider/health", window.location.origin);
@ -55,6 +65,7 @@ export const useProviderHealthQuery = (
}
// Add test_completion query param if specified or if chat error exists
// Use the same testCompletion value that's in the queryKey
const testCompletion = params?.test_completion ?? hasChatError;
if (testCompletion) {
url.searchParams.set("test_completion", "true");
@ -101,7 +112,10 @@ export const useProviderHealthQuery = (
}
}
const queryKey = ["provider", "health", params?.test_completion];
// Include hasChatError in queryKey so React Query refetches when it changes
// This ensures the health check runs with test_completion=true when chat errors occur
const testCompletion = params?.test_completion ?? hasChatError;
const queryKey = ["provider", "health", testCompletion, hasChatError];
const failureCountKey = queryKey.join("-");
const queryResult = useQuery(
@ -143,7 +157,11 @@ export const useProviderHealthQuery = (
refetchOnWindowFocus: false, // Disabled to reduce unnecessary calls on tab switches
refetchOnMount: true,
staleTime: 30000, // Consider data stale after 30 seconds
enabled: !!settings?.edited && options?.enabled !== false, // Only run after onboarding is complete
enabled:
!!settings?.edited &&
isOnboardingComplete &&
!hasActiveIngestion && // Disable health checks when ingestion is happening
options?.enabled !== false, // Only run after onboarding is complete
...options,
},
queryClient,

File diff suppressed because it is too large Load diff

View file

@ -1,13 +1,13 @@
import type { Dispatch, SetStateAction } from "react";
import { useEffect, useState } from "react";
import IBMLogo from "@/components/icons/ibm-logo";
import { LabelInput } from "@/components/label-input";
import { LabelWrapper } from "@/components/label-wrapper";
import IBMLogo from "@/components/icons/ibm-logo";
import { Switch } from "@/components/ui/switch";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/ui/tooltip";
import { useDebouncedValue } from "@/lib/debounce";
import type { OnboardingVariables } from "../../api/mutations/useOnboardingMutation";
@ -18,273 +18,273 @@ import { AdvancedOnboarding } from "./advanced";
import { ModelSelector } from "./model-selector";
export function IBMOnboarding({
isEmbedding = false,
setSettings,
sampleDataset,
setSampleDataset,
setIsLoadingModels,
alreadyConfigured = false,
existingEndpoint,
existingProjectId,
hasEnvApiKey = false,
isEmbedding = false,
setSettings,
sampleDataset,
setSampleDataset,
setIsLoadingModels,
alreadyConfigured = false,
existingEndpoint,
existingProjectId,
hasEnvApiKey = false,
}: {
isEmbedding?: boolean;
setSettings: Dispatch<SetStateAction<OnboardingVariables>>;
sampleDataset: boolean;
setSampleDataset: (dataset: boolean) => void;
setIsLoadingModels?: (isLoading: boolean) => void;
alreadyConfigured?: boolean;
existingEndpoint?: string;
existingProjectId?: string;
hasEnvApiKey?: boolean;
isEmbedding?: boolean;
setSettings: Dispatch<SetStateAction<OnboardingVariables>>;
sampleDataset: boolean;
setSampleDataset: (dataset: boolean) => void;
setIsLoadingModels?: (isLoading: boolean) => void;
alreadyConfigured?: boolean;
existingEndpoint?: string;
existingProjectId?: string;
hasEnvApiKey?: boolean;
}) {
const [endpoint, setEndpoint] = useState(
alreadyConfigured ? "" : (existingEndpoint || "https://us-south.ml.cloud.ibm.com"),
);
const [apiKey, setApiKey] = useState("");
const [getFromEnv, setGetFromEnv] = useState(
hasEnvApiKey && !alreadyConfigured,
);
const [projectId, setProjectId] = useState(
alreadyConfigured ? "" : (existingProjectId || ""),
);
const [endpoint, setEndpoint] = useState(
alreadyConfigured
? ""
: existingEndpoint || "https://us-south.ml.cloud.ibm.com",
);
const [apiKey, setApiKey] = useState("");
const [getFromEnv, setGetFromEnv] = useState(
hasEnvApiKey && !alreadyConfigured,
);
const [projectId, setProjectId] = useState(
alreadyConfigured ? "" : existingProjectId || "",
);
const options = [
{
value: "https://us-south.ml.cloud.ibm.com",
label: "https://us-south.ml.cloud.ibm.com",
default: true,
},
{
value: "https://eu-de.ml.cloud.ibm.com",
label: "https://eu-de.ml.cloud.ibm.com",
default: false,
},
{
value: "https://eu-gb.ml.cloud.ibm.com",
label: "https://eu-gb.ml.cloud.ibm.com",
default: false,
},
{
value: "https://au-syd.ml.cloud.ibm.com",
label: "https://au-syd.ml.cloud.ibm.com",
default: false,
},
{
value: "https://jp-tok.ml.cloud.ibm.com",
label: "https://jp-tok.ml.cloud.ibm.com",
default: false,
},
{
value: "https://ca-tor.ml.cloud.ibm.com",
label: "https://ca-tor.ml.cloud.ibm.com",
default: false,
},
];
const debouncedEndpoint = useDebouncedValue(endpoint, 500);
const debouncedApiKey = useDebouncedValue(apiKey, 500);
const debouncedProjectId = useDebouncedValue(projectId, 500);
const options = [
{
value: "https://us-south.ml.cloud.ibm.com",
label: "https://us-south.ml.cloud.ibm.com",
default: true,
},
{
value: "https://eu-de.ml.cloud.ibm.com",
label: "https://eu-de.ml.cloud.ibm.com",
default: false,
},
{
value: "https://eu-gb.ml.cloud.ibm.com",
label: "https://eu-gb.ml.cloud.ibm.com",
default: false,
},
{
value: "https://au-syd.ml.cloud.ibm.com",
label: "https://au-syd.ml.cloud.ibm.com",
default: false,
},
{
value: "https://jp-tok.ml.cloud.ibm.com",
label: "https://jp-tok.ml.cloud.ibm.com",
default: false,
},
{
value: "https://ca-tor.ml.cloud.ibm.com",
label: "https://ca-tor.ml.cloud.ibm.com",
default: false,
},
];
const debouncedEndpoint = useDebouncedValue(endpoint, 500);
const debouncedApiKey = useDebouncedValue(apiKey, 500);
const debouncedProjectId = useDebouncedValue(projectId, 500);
// Fetch models from API when all credentials are provided
const {
data: modelsData,
isLoading: isLoadingModels,
error: modelsError,
} = useGetIBMModelsQuery(
{
endpoint: debouncedEndpoint ? debouncedEndpoint : undefined,
apiKey: getFromEnv ? "" : (debouncedApiKey ? debouncedApiKey : undefined),
projectId: debouncedProjectId ? debouncedProjectId : undefined,
},
{
enabled:
!!debouncedEndpoint ||
!!debouncedApiKey ||
!!debouncedProjectId ||
getFromEnv ||
alreadyConfigured,
},
);
// Fetch models from API when all credentials are provided
const {
data: modelsData,
isLoading: isLoadingModels,
error: modelsError,
} = useGetIBMModelsQuery(
{
endpoint: debouncedEndpoint ? debouncedEndpoint : undefined,
apiKey: getFromEnv ? "" : debouncedApiKey ? debouncedApiKey : undefined,
projectId: debouncedProjectId ? debouncedProjectId : undefined,
},
{
enabled:
(!!debouncedEndpoint && !!debouncedApiKey && !!debouncedProjectId) ||
getFromEnv ||
alreadyConfigured,
},
);
// Use custom hook for model selection logic
const {
languageModel,
embeddingModel,
setLanguageModel,
setEmbeddingModel,
languageModels,
embeddingModels,
} = useModelSelection(modelsData, isEmbedding);
// Use custom hook for model selection logic
const {
languageModel,
embeddingModel,
setLanguageModel,
setEmbeddingModel,
languageModels,
embeddingModels,
} = useModelSelection(modelsData, isEmbedding);
const handleGetFromEnvChange = (fromEnv: boolean) => {
setGetFromEnv(fromEnv);
if (fromEnv) {
setApiKey("");
}
setEmbeddingModel?.("");
setLanguageModel?.("");
};
const handleGetFromEnvChange = (fromEnv: boolean) => {
setGetFromEnv(fromEnv);
if (fromEnv) {
setApiKey("");
}
setEmbeddingModel?.("");
setLanguageModel?.("");
};
const handleSampleDatasetChange = (dataset: boolean) => {
setSampleDataset(dataset);
};
const handleSampleDatasetChange = (dataset: boolean) => {
setSampleDataset(dataset);
};
useEffect(() => {
setIsLoadingModels?.(isLoadingModels);
}, [isLoadingModels, setIsLoadingModels]);
useEffect(() => {
setIsLoadingModels?.(isLoadingModels);
}, [isLoadingModels, setIsLoadingModels]);
// Update settings when values change
useUpdateSettings(
"watsonx",
{
endpoint,
apiKey,
projectId,
languageModel,
embeddingModel,
},
setSettings,
isEmbedding,
);
// Update settings when values change
useUpdateSettings(
"watsonx",
{
endpoint,
apiKey,
projectId,
languageModel,
embeddingModel,
},
setSettings,
isEmbedding,
);
return (
<>
<div className="space-y-4">
<LabelWrapper
label="watsonx.ai API Endpoint"
helperText="Base URL of the API"
id="api-endpoint"
required
>
<div className="space-y-1">
<ModelSelector
options={alreadyConfigured ? [] : options}
value={endpoint}
custom
onValueChange={alreadyConfigured ? () => {} : setEndpoint}
searchPlaceholder="Search endpoint..."
noOptionsPlaceholder={
alreadyConfigured
? "https://•••••••••••••••••••••••••••••••••••••••••"
: "No endpoints available"
}
placeholder="Select endpoint..."
/>
{alreadyConfigured && (
<p className="text-mmd text-muted-foreground">
Reusing endpoint from model provider selection.
</p>
)}
</div>
</LabelWrapper>
return (
<>
<div className="space-y-4">
<LabelWrapper
label="watsonx.ai API Endpoint"
helperText="Base URL of the API"
id="api-endpoint"
required
>
<div className="space-y-1">
<ModelSelector
options={alreadyConfigured ? [] : options}
value={endpoint}
custom
onValueChange={alreadyConfigured ? () => {} : setEndpoint}
searchPlaceholder="Search endpoint..."
noOptionsPlaceholder={
alreadyConfigured
? "https://•••••••••••••••••••••••••••••••••••••••••"
: "No endpoints available"
}
placeholder="Select endpoint..."
/>
{alreadyConfigured && (
<p className="text-mmd text-muted-foreground">
Reusing endpoint from model provider selection.
</p>
)}
</div>
</LabelWrapper>
<div className="space-y-1">
<LabelInput
label="watsonx Project ID"
helperText="Project ID for the model"
id="project-id"
required
placeholder={
alreadyConfigured ? "••••••••••••••••••••••••" : "your-project-id"
}
value={projectId}
onChange={(e) => setProjectId(e.target.value)}
disabled={alreadyConfigured}
/>
{alreadyConfigured && (
<p className="text-mmd text-muted-foreground">
Reusing project ID from model provider selection.
</p>
)}
</div>
<LabelWrapper
label="Use environment watsonx API key"
id="get-api-key"
description="Reuse the key from your environment config. Turn off to enter a different key."
flex
>
<Tooltip>
<TooltipTrigger asChild>
<div>
<Switch
checked={getFromEnv}
onCheckedChange={handleGetFromEnvChange}
disabled={!hasEnvApiKey || alreadyConfigured}
/>
</div>
</TooltipTrigger>
{!hasEnvApiKey && !alreadyConfigured && (
<TooltipContent>
watsonx API key not detected in the environment.
</TooltipContent>
)}
</Tooltip>
</LabelWrapper>
{!getFromEnv && !alreadyConfigured && (
<div className="space-y-1">
<LabelInput
label="watsonx API key"
helperText="API key to access watsonx.ai"
className={modelsError ? "!border-destructive" : ""}
id="api-key"
type="password"
required
placeholder="your-api-key"
value={apiKey}
onChange={(e) => setApiKey(e.target.value)}
/>
{isLoadingModels && (
<p className="text-mmd text-muted-foreground">
Validating API key...
</p>
)}
{modelsError && (
<p className="text-mmd text-destructive">
Invalid watsonx API key. Verify or replace the key.
</p>
)}
</div>
)}
{alreadyConfigured && (
<div className="space-y-1">
<LabelInput
label="watsonx API key"
helperText="API key to access watsonx.ai"
id="api-key"
type="password"
required
placeholder="•••••••••••••••••••••••••••••••••••••••••"
value={apiKey}
onChange={(e) => setApiKey(e.target.value)}
disabled={true}
/>
<p className="text-mmd text-muted-foreground">
Reusing API key from model provider selection.
</p>
</div>
)}
{getFromEnv && isLoadingModels && (
<p className="text-mmd text-muted-foreground">
Validating configuration...
</p>
)}
{getFromEnv && modelsError && (
<p className="text-mmd text-accent-amber-foreground">
Connection failed. Check your configuration.
</p>
)}
</div>
<AdvancedOnboarding
icon={<IBMLogo className="w-4 h-4" />}
languageModels={languageModels}
embeddingModels={embeddingModels}
languageModel={languageModel}
embeddingModel={embeddingModel}
sampleDataset={sampleDataset}
setLanguageModel={setLanguageModel}
setEmbeddingModel={setEmbeddingModel}
setSampleDataset={handleSampleDatasetChange}
/>
</>
);
<div className="space-y-1">
<LabelInput
label="watsonx Project ID"
helperText="Project ID for the model"
id="project-id"
required
placeholder={
alreadyConfigured ? "••••••••••••••••••••••••" : "your-project-id"
}
value={projectId}
onChange={(e) => setProjectId(e.target.value)}
disabled={alreadyConfigured}
/>
{alreadyConfigured && (
<p className="text-mmd text-muted-foreground">
Reusing project ID from model provider selection.
</p>
)}
</div>
<LabelWrapper
label="Use environment watsonx API key"
id="get-api-key"
description="Reuse the key from your environment config. Turn off to enter a different key."
flex
>
<Tooltip>
<TooltipTrigger asChild>
<div>
<Switch
checked={getFromEnv}
onCheckedChange={handleGetFromEnvChange}
disabled={!hasEnvApiKey || alreadyConfigured}
/>
</div>
</TooltipTrigger>
{!hasEnvApiKey && !alreadyConfigured && (
<TooltipContent>
watsonx API key not detected in the environment.
</TooltipContent>
)}
</Tooltip>
</LabelWrapper>
{!getFromEnv && !alreadyConfigured && (
<div className="space-y-1">
<LabelInput
label="watsonx API key"
helperText="API key to access watsonx.ai"
className={modelsError ? "!border-destructive" : ""}
id="api-key"
type="password"
required
placeholder="your-api-key"
value={apiKey}
onChange={(e) => setApiKey(e.target.value)}
/>
{isLoadingModels && (
<p className="text-mmd text-muted-foreground">
Validating API key...
</p>
)}
{modelsError && (
<p className="text-mmd text-destructive">
Invalid watsonx API key. Verify or replace the key.
</p>
)}
</div>
)}
{alreadyConfigured && (
<div className="space-y-1">
<LabelInput
label="watsonx API key"
helperText="API key to access watsonx.ai"
id="api-key"
type="password"
required
placeholder="•••••••••••••••••••••••••••••••••••••••••"
value={apiKey}
onChange={(e) => setApiKey(e.target.value)}
disabled={true}
/>
<p className="text-mmd text-muted-foreground">
Reusing API key from model provider selection.
</p>
</div>
)}
{getFromEnv && isLoadingModels && (
<p className="text-mmd text-muted-foreground">
Validating configuration...
</p>
)}
{getFromEnv && modelsError && (
<p className="text-mmd text-accent-amber-foreground">
Connection failed. Check your configuration.
</p>
)}
</div>
<AdvancedOnboarding
icon={<IBMLogo className="w-4 h-4" />}
languageModels={languageModels}
embeddingModels={embeddingModels}
languageModel={languageModel}
embeddingModel={embeddingModel}
sampleDataset={sampleDataset}
setLanguageModel={setLanguageModel}
setEmbeddingModel={setEmbeddingModel}
setSampleDataset={handleSampleDatasetChange}
/>
</>
);
}

View file

@ -507,7 +507,7 @@ const OnboardingCard = ({
hasEnvApiKey={
currentSettings?.providers?.openai?.has_api_key === true
}
alreadyConfigured={providerAlreadyConfigured}
alreadyConfigured={providerAlreadyConfigured && modelProvider === "openai"}
/>
</TabsContent>
<TabsContent value="watsonx">
@ -517,7 +517,7 @@ const OnboardingCard = ({
setSampleDataset={setSampleDataset}
setIsLoadingModels={setIsLoadingModels}
isEmbedding={isEmbedding}
alreadyConfigured={providerAlreadyConfigured}
alreadyConfigured={providerAlreadyConfigured && modelProvider === "watsonx"}
existingEndpoint={currentSettings?.providers?.watsonx?.endpoint}
existingProjectId={currentSettings?.providers?.watsonx?.project_id}
hasEnvApiKey={currentSettings?.providers?.watsonx?.has_api_key === true}
@ -530,7 +530,7 @@ const OnboardingCard = ({
setSampleDataset={setSampleDataset}
setIsLoadingModels={setIsLoadingModels}
isEmbedding={isEmbedding}
alreadyConfigured={providerAlreadyConfigured}
alreadyConfigured={providerAlreadyConfigured && modelProvider === "ollama"}
existingEndpoint={currentSettings?.providers?.ollama?.endpoint}
/>
</TabsContent>

View file

@ -1,3 +1,4 @@
import { X } from "lucide-react";
import { AnimatePresence, motion } from "motion/react";
import { type ChangeEvent, useEffect, useRef, useState } from "react";
import { toast } from "sonner";
@ -7,242 +8,400 @@ import { useGetTasksQuery } from "@/app/api/queries/useGetTasksQuery";
import { AnimatedProviderSteps } from "@/app/onboarding/_components/animated-provider-steps";
import { Button } from "@/components/ui/button";
import {
ONBOARDING_UPLOAD_STEPS_KEY,
ONBOARDING_USER_DOC_FILTER_ID_KEY,
ONBOARDING_UPLOAD_STEPS_KEY,
ONBOARDING_USER_DOC_FILTER_ID_KEY,
} from "@/lib/constants";
import { uploadFile } from "@/lib/upload-utils";
interface OnboardingUploadProps {
onComplete: () => void;
onComplete: () => void;
}
const OnboardingUpload = ({ onComplete }: OnboardingUploadProps) => {
const fileInputRef = useRef<HTMLInputElement>(null);
const [isUploading, setIsUploading] = useState(false);
const [currentStep, setCurrentStep] = useState<number | null>(null);
const [uploadedFilename, setUploadedFilename] = useState<string | null>(null);
const [shouldCreateFilter, setShouldCreateFilter] = useState(false);
const [isCreatingFilter, setIsCreatingFilter] = useState(false);
const fileInputRef = useRef<HTMLInputElement>(null);
const [isUploading, setIsUploading] = useState(false);
const [currentStep, setCurrentStep] = useState<number | null>(null);
const [uploadedFilename, setUploadedFilename] = useState<string | null>(null);
const [uploadedTaskId, setUploadedTaskId] = useState<string | null>(null);
const [shouldCreateFilter, setShouldCreateFilter] = useState(false);
const [isCreatingFilter, setIsCreatingFilter] = useState(false);
const [error, setError] = useState<string | null>(null);
const createFilterMutation = useCreateFilter();
// Track which tasks we've already handled to prevent infinite loops
const handledFailedTasksRef = useRef<Set<string>>(new Set());
const STEP_LIST = [
"Uploading your document",
"Generating embeddings",
"Ingesting document",
"Processing your document",
];
const createFilterMutation = useCreateFilter();
// Query tasks to track completion
const { data: tasks } = useGetTasksQuery({
enabled: currentStep !== null, // Only poll when upload has started
refetchInterval: currentStep !== null ? 1000 : false, // Poll every 1 second during upload
});
const STEP_LIST = [
"Uploading your document",
"Generating embeddings",
"Ingesting document",
"Processing your document",
];
const { refetch: refetchNudges } = useGetNudgesQuery(null);
// Query tasks to track completion
const { data: tasks } = useGetTasksQuery({
enabled: currentStep !== null, // Only poll when upload has started
refetchInterval: currentStep !== null ? 1000 : false, // Poll every 1 second during upload
});
// Monitor tasks and call onComplete when file processing is done
useEffect(() => {
if (currentStep === null || !tasks) {
return;
}
// Monitor tasks and call onComplete when file processing is done
useEffect(() => {
if (currentStep === null || !tasks || !uploadedTaskId) {
return;
}
// Check if there are any active tasks (pending, running, or processing)
const activeTasks = tasks.find(
(task) =>
task.status === "pending" ||
task.status === "running" ||
task.status === "processing",
);
// Find the task by task ID from the upload response
const matchingTask = tasks.find((task) => task.task_id === uploadedTaskId);
// If no active tasks and we have more than 1 task (initial + new upload), complete it
if (
(!activeTasks || (activeTasks.processed_files ?? 0) > 0) &&
tasks.length > 1
) {
// Set to final step to show "Done"
setCurrentStep(STEP_LIST.length);
// If no matching task found, wait for it to appear
if (!matchingTask) {
return;
}
// Create knowledge filter for uploaded document if requested
// Guard against race condition: only create if not already creating
if (shouldCreateFilter && uploadedFilename && !isCreatingFilter) {
// Reset flags immediately (synchronously) to prevent duplicate creation
setShouldCreateFilter(false);
const filename = uploadedFilename;
setUploadedFilename(null);
setIsCreatingFilter(true);
// Skip if this task was already handled as a failed task (from a previous failed upload)
// This prevents processing old failed tasks when a new upload starts
if (handledFailedTasksRef.current.has(matchingTask.task_id)) {
// Check if it's a failed task that we've already handled
const hasFailedFile =
matchingTask.files &&
Object.values(matchingTask.files).some(
(file) => file.status === "failed" || file.status === "error",
);
if (hasFailedFile) {
// This is an old failed task that we've already handled, ignore it
console.log(
"Skipping already-handled failed task:",
matchingTask.task_id,
);
return;
}
// If it's not a failed task, remove it from handled list (it might have succeeded on retry)
handledFailedTasksRef.current.delete(matchingTask.task_id);
}
// Get display name from filename (remove extension for cleaner name)
const displayName = filename.includes(".")
? filename.substring(0, filename.lastIndexOf("."))
: filename;
// Check if any file failed in the matching task
const hasFailedFile = (() => {
// Must have files object
if (!matchingTask.files || typeof matchingTask.files !== "object") {
return false;
}
const queryData = JSON.stringify({
query: "",
filters: {
data_sources: [filename],
document_types: ["*"],
owners: ["*"],
connector_types: ["*"],
},
limit: 10,
scoreThreshold: 0,
color: "green",
icon: "file",
});
const fileEntries = Object.values(matchingTask.files);
createFilterMutation
.mutateAsync({
name: displayName,
description: `Filter for ${filename}`,
queryData: queryData,
})
.then((result) => {
if (result.filter?.id && typeof window !== "undefined") {
localStorage.setItem(
ONBOARDING_USER_DOC_FILTER_ID_KEY,
result.filter.id,
);
console.log(
"Created knowledge filter for uploaded document",
result.filter.id,
);
}
})
.catch((error) => {
console.error("Failed to create knowledge filter:", error);
})
.finally(() => {
setIsCreatingFilter(false);
});
}
// Must have at least one file
if (fileEntries.length === 0) {
return false;
}
// Refetch nudges to get new ones
refetchNudges();
// Check if any file has failed status
return fileEntries.some(
(file) => file.status === "failed" || file.status === "error",
);
})();
// Wait a bit before completing
setTimeout(() => {
onComplete();
}, 1000);
}
}, [tasks, currentStep, onComplete, refetchNudges, shouldCreateFilter, uploadedFilename]);
// If any file failed, show error and jump back one step (like onboarding-card.tsx)
// Only handle if we haven't already handled this task
if (
hasFailedFile &&
!isCreatingFilter &&
!handledFailedTasksRef.current.has(matchingTask.task_id)
) {
console.error("File failed in task, jumping back one step", matchingTask);
const resetFileInput = () => {
if (fileInputRef.current) {
fileInputRef.current.value = "";
}
};
// Mark this task as handled to prevent infinite loops
handledFailedTasksRef.current.add(matchingTask.task_id);
const handleUploadClick = () => {
fileInputRef.current?.click();
};
// Extract error messages from failed files
const errorMessages: string[] = [];
if (matchingTask.files) {
Object.values(matchingTask.files).forEach((file) => {
if (
(file.status === "failed" || file.status === "error") &&
file.error
) {
errorMessages.push(file.error);
}
});
}
const performUpload = async (file: File) => {
setIsUploading(true);
try {
setCurrentStep(0);
const result = await uploadFile(file, true, true); // Pass createFilter=true
console.log("Document upload task started successfully");
// Also check task-level error
if (matchingTask.error) {
errorMessages.push(matchingTask.error);
}
// Store filename and createFilter flag in state to create filter after ingestion succeeds
if (result.createFilter && result.filename) {
setUploadedFilename(result.filename);
setShouldCreateFilter(true);
}
// Use the first error message, or a generic message if no errors found
const errorMessage =
errorMessages.length > 0
? errorMessages[0]
: "Document failed to ingest. Please try again with a different file.";
// Move to processing step - task monitoring will handle completion
setTimeout(() => {
setCurrentStep(1);
}, 1500);
} catch (error) {
const errorMessage = error instanceof Error ? error.message : "Upload failed";
console.error("Upload failed", errorMessage);
// Set error message and jump back one step
setError(errorMessage);
setCurrentStep(STEP_LIST.length);
// Dispatch event that chat context can listen to
// This avoids circular dependency issues
if (typeof window !== "undefined") {
window.dispatchEvent(
new CustomEvent("ingestionFailed", {
detail: { source: "onboarding" },
}),
);
}
// Clear filter creation flags since ingestion failed
setShouldCreateFilter(false);
setUploadedFilename(null);
// Show error toast notification
toast.error("Document upload failed", {
description: errorMessage,
duration: 5000,
});
// Jump back one step after 1 second (go back to upload step)
setTimeout(() => {
setCurrentStep(null);
}, 1000);
return;
}
// Reset on error
setCurrentStep(null);
} finally {
setIsUploading(false);
}
};
// Check if the matching task is still active (pending, running, or processing)
const isTaskActive =
matchingTask.status === "pending" ||
matchingTask.status === "running" ||
matchingTask.status === "processing";
const handleFileChange = async (event: ChangeEvent<HTMLInputElement>) => {
const selectedFile = event.target.files?.[0];
if (!selectedFile) {
resetFileInput();
return;
}
// If task is completed successfully (no failures) and has processed files, complete the onboarding step
if (
(!isTaskActive || (matchingTask.processed_files ?? 0) > 0) &&
!hasFailedFile
) {
// Set to final step to show "Done"
setCurrentStep(STEP_LIST.length);
try {
await performUpload(selectedFile);
} catch (error) {
console.error(
"Unable to prepare file for upload",
(error as Error).message,
);
} finally {
resetFileInput();
}
};
// Create knowledge filter for uploaded document if requested
// Guard against race condition: only create if not already creating
if (shouldCreateFilter && uploadedFilename && !isCreatingFilter) {
// Reset flags immediately (synchronously) to prevent duplicate creation
setShouldCreateFilter(false);
const filename = uploadedFilename;
setUploadedFilename(null);
setIsCreatingFilter(true);
return (
<AnimatePresence mode="wait">
{currentStep === null ? (
<motion.div
key="user-ingest"
initial={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: -24 }}
transition={{ duration: 0.4, ease: "easeInOut" }}
>
<Button
size="sm"
variant="outline"
onClick={handleUploadClick}
disabled={isUploading}
>
<div>{isUploading ? "Uploading..." : "Add a document"}</div>
</Button>
<input
ref={fileInputRef}
type="file"
onChange={handleFileChange}
className="hidden"
accept=".pdf,.doc,.docx,.txt,.md,.rtf,.odt"
/>
</motion.div>
) : (
<motion.div
key="ingest-steps"
initial={{ opacity: 0, y: 24 }}
animate={{ opacity: 1, y: 0 }}
transition={{ duration: 0.4, ease: "easeInOut" }}
>
<AnimatedProviderSteps
currentStep={currentStep}
setCurrentStep={setCurrentStep}
isCompleted={false}
steps={STEP_LIST}
storageKey={ONBOARDING_UPLOAD_STEPS_KEY}
/>
</motion.div>
)}
</AnimatePresence>
);
// Get display name from filename (remove extension for cleaner name)
const displayName = filename.includes(".")
? filename.substring(0, filename.lastIndexOf("."))
: filename;
const queryData = JSON.stringify({
query: "",
filters: {
data_sources: [filename],
document_types: ["*"],
owners: ["*"],
connector_types: ["*"],
},
limit: 10,
scoreThreshold: 0,
color: "green",
icon: "file",
});
// Wait for filter creation to complete before proceeding
createFilterMutation
.mutateAsync({
name: displayName,
description: `Filter for ${filename}`,
queryData: queryData,
})
.then((result) => {
if (result.filter?.id && typeof window !== "undefined") {
localStorage.setItem(
ONBOARDING_USER_DOC_FILTER_ID_KEY,
result.filter.id,
);
console.log(
"Created knowledge filter for uploaded document",
result.filter.id,
);
}
})
.catch((error) => {
console.error("Failed to create knowledge filter:", error);
})
.finally(() => {
setIsCreatingFilter(false);
// Wait a bit before completing (after filter is created)
setTimeout(() => {
onComplete();
}, 1000);
});
} else {
// No filter to create, just complete
// Wait a bit before completing
setTimeout(() => {
onComplete();
}, 1000);
}
}
}, [
tasks,
currentStep,
onComplete,
shouldCreateFilter,
uploadedFilename,
uploadedTaskId,
createFilterMutation,
isCreatingFilter,
]);
const resetFileInput = () => {
if (fileInputRef.current) {
fileInputRef.current.value = "";
}
};
const handleUploadClick = () => {
// Clear any previous error when user clicks to upload again
setError(null);
fileInputRef.current?.click();
};
const performUpload = async (file: File) => {
setIsUploading(true);
// Clear any previous error when starting a new upload
setError(null);
// Clear handled tasks ref to allow retry
handledFailedTasksRef.current.clear();
// Reset task ID to prevent matching old failed tasks
setUploadedTaskId(null);
// Clear filter creation flags
setShouldCreateFilter(false);
setUploadedFilename(null);
try {
setCurrentStep(0);
const result = await uploadFile(file, true, true); // Pass createFilter=true
console.log("Document upload task started successfully");
// Store task ID to track the specific upload task
if (result.taskId) {
setUploadedTaskId(result.taskId);
}
// Store filename and createFilter flag in state to create filter after ingestion succeeds
if (result.createFilter && result.filename) {
setUploadedFilename(result.filename);
setShouldCreateFilter(true);
}
// Move to processing step - task monitoring will handle completion
setTimeout(() => {
setCurrentStep(1);
}, 1500);
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "Upload failed";
console.error("Upload failed", errorMessage);
// Dispatch event that chat context can listen to
// This avoids circular dependency issues
if (typeof window !== "undefined") {
window.dispatchEvent(
new CustomEvent("ingestionFailed", {
detail: { source: "onboarding" },
}),
);
}
// Show error toast notification
toast.error("Document upload failed", {
description: errorMessage,
duration: 5000,
});
// Reset on error
setCurrentStep(null);
setUploadedTaskId(null);
setError(errorMessage);
setShouldCreateFilter(false);
setUploadedFilename(null);
} finally {
setIsUploading(false);
}
};
const handleFileChange = async (event: ChangeEvent<HTMLInputElement>) => {
const selectedFile = event.target.files?.[0];
if (!selectedFile) {
resetFileInput();
return;
}
try {
await performUpload(selectedFile);
} catch (error) {
console.error(
"Unable to prepare file for upload",
(error as Error).message,
);
} finally {
resetFileInput();
}
};
return (
<AnimatePresence mode="wait">
{currentStep === null ? (
<motion.div
key="user-ingest"
initial={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: -24 }}
transition={{ duration: 0.4, ease: "easeInOut" }}
>
<div className="w-full flex flex-col gap-4">
<AnimatePresence mode="wait">
{error && (
<motion.div
key="error"
initial={{ opacity: 1, y: 0, height: "auto" }}
exit={{ opacity: 0, y: -10, height: 0 }}
>
<div className="pb-2 flex items-center gap-4">
<X className="w-4 h-4 text-destructive shrink-0" />
<span className="text-sm text-muted-foreground">
{error}
</span>
</div>
</motion.div>
)}
</AnimatePresence>
<div>
<Button
size="sm"
variant="outline"
onClick={handleUploadClick}
disabled={isUploading}
>
<div>{isUploading ? "Uploading..." : "Add a document"}</div>
</Button>
</div>
<input
ref={fileInputRef}
type="file"
onChange={handleFileChange}
className="hidden"
accept=".pdf,.doc,.docx,.txt,.md,.rtf,.odt"
/>
</div>
</motion.div>
) : (
<motion.div
key="ingest-steps"
initial={{ opacity: 0, y: 24 }}
animate={{ opacity: 1, y: 0 }}
transition={{ duration: 0.4, ease: "easeInOut" }}
>
<AnimatedProviderSteps
currentStep={currentStep}
setCurrentStep={setCurrentStep}
isCompleted={false}
steps={STEP_LIST}
storageKey={ONBOARDING_UPLOAD_STEPS_KEY}
hasError={!!error}
/>
</motion.div>
)}
</AnimatePresence>
);
};
export default OnboardingUpload;

View file

@ -47,8 +47,7 @@ export function ChatRenderer({
refreshConversations,
startNewConversation,
setConversationFilter,
setCurrentConversationId,
setPreviousResponseIds,
setOnboardingComplete,
} = useChat();
// Initialize onboarding state based on local storage and settings
@ -170,6 +169,9 @@ export function ChatRenderer({
localStorage.removeItem(ONBOARDING_UPLOAD_STEPS_KEY);
}
// Mark onboarding as complete in context
setOnboardingComplete(true);
// Clear ALL conversation state so next message starts fresh
await startNewConversation();
@ -202,6 +204,8 @@ export function ChatRenderer({
localStorage.removeItem(ONBOARDING_CARD_STEPS_KEY);
localStorage.removeItem(ONBOARDING_UPLOAD_STEPS_KEY);
}
// Mark onboarding as complete in context
setOnboardingComplete(true);
// Store the OpenRAG docs filter as default for new conversations
storeDefaultFilterForNewConversations(false);
setShowLayout(true);

View file

@ -5,125 +5,131 @@ import { useRouter } from "next/navigation";
import { useProviderHealthQuery } from "@/app/api/queries/useProviderHealthQuery";
import type { ModelProvider } from "@/app/settings/_helpers/model-helpers";
import { Banner, BannerIcon, BannerTitle } from "@/components/ui/banner";
import { cn } from "@/lib/utils";
import { useChat } from "@/contexts/chat-context";
import { cn } from "@/lib/utils";
import { Button } from "./ui/button";
interface ProviderHealthBannerProps {
className?: string;
className?: string;
}
// Custom hook to check provider health status
export function useProviderHealth() {
const { hasChatError } = useChat();
const {
data: health,
isLoading,
isFetching,
error,
isError,
} = useProviderHealthQuery({
test_completion: hasChatError, // Use test_completion=true when chat errors occur
});
const { hasChatError } = useChat();
const {
data: health,
isLoading,
isFetching,
error,
isError,
} = useProviderHealthQuery({
test_completion: hasChatError, // Use test_completion=true when chat errors occur
});
const isHealthy = health?.status === "healthy" && !isError;
// Only consider unhealthy if backend is up but provider validation failed
// Don't show banner if backend is unavailable
const isUnhealthy =
health?.status === "unhealthy" || health?.status === "error";
const isBackendUnavailable =
health?.status === "backend-unavailable" || isError;
const isHealthy = health?.status === "healthy" && !isError;
// Only consider unhealthy if backend is up but provider validation failed
// Don't show banner if backend is unavailable
const isUnhealthy =
health?.status === "unhealthy" || health?.status === "error";
const isBackendUnavailable =
health?.status === "backend-unavailable" || isError;
return {
health,
isLoading,
isFetching,
error,
isError,
isHealthy,
isUnhealthy,
isBackendUnavailable,
};
return {
health,
isLoading,
isFetching,
error,
isError,
isHealthy,
isUnhealthy,
isBackendUnavailable,
};
}
const providerTitleMap: Record<ModelProvider, string> = {
openai: "OpenAI",
anthropic: "Anthropic",
ollama: "Ollama",
watsonx: "IBM watsonx.ai",
openai: "OpenAI",
anthropic: "Anthropic",
ollama: "Ollama",
watsonx: "IBM watsonx.ai",
};
export function ProviderHealthBanner({ className }: ProviderHealthBannerProps) {
const { isLoading, isHealthy, isUnhealthy, health } = useProviderHealth();
const router = useRouter();
const { isLoading, isHealthy, isUnhealthy, health } = useProviderHealth();
const router = useRouter();
// Only show banner when provider is unhealthy (not when backend is unavailable)
if (isLoading || isHealthy) {
return null;
}
// Only show banner when provider is unhealthy (not when backend is unavailable)
if (isLoading || isHealthy) {
return null;
}
if (isUnhealthy) {
const llmProvider = health?.llm_provider || health?.provider;
const embeddingProvider = health?.embedding_provider;
const llmError = health?.llm_error;
const embeddingError = health?.embedding_error;
if (isUnhealthy) {
const llmProvider = health?.llm_provider || health?.provider;
const embeddingProvider = health?.embedding_provider;
const llmError = health?.llm_error;
const embeddingError = health?.embedding_error;
// Determine which provider has the error
let errorProvider: string | undefined;
let errorMessage: string;
// Determine which provider has the error
let errorProvider: string | undefined;
let errorMessage: string;
if (llmError && embeddingError) {
// Both have errors - show combined message
errorMessage = health?.message || "Provider validation failed";
errorProvider = undefined; // Don't link to a specific provider
} else if (llmError) {
// Only LLM has error
errorProvider = llmProvider;
errorMessage = llmError;
} else if (embeddingError) {
// Only embedding has error
errorProvider = embeddingProvider;
errorMessage = embeddingError;
} else {
// Fallback to original message
errorMessage = health?.message || "Provider validation failed";
errorProvider = llmProvider;
}
if (llmError && embeddingError) {
// Both have errors - check if they're the same
if (llmError === embeddingError) {
// Same error for both - show once
errorMessage = llmError;
} else {
// Different errors - show both
errorMessage = `${llmError}; ${embeddingError}`;
}
errorProvider = undefined; // Don't link to a specific provider
} else if (llmError) {
// Only LLM has error
errorProvider = llmProvider;
errorMessage = llmError;
} else if (embeddingError) {
// Only embedding has error
errorProvider = embeddingProvider;
errorMessage = embeddingError;
} else {
// Fallback to original message
errorMessage = health?.message || "Provider validation failed";
errorProvider = llmProvider;
}
const providerTitle = errorProvider
? providerTitleMap[errorProvider as ModelProvider] || errorProvider
: "Provider";
const providerTitle = errorProvider
? providerTitleMap[errorProvider as ModelProvider] || errorProvider
: "Provider";
const settingsUrl = errorProvider
? `/settings?setup=${errorProvider}`
: "/settings";
const settingsUrl = errorProvider
? `/settings?setup=${errorProvider}`
: "/settings";
return (
<Banner
className={cn(
"bg-red-50 dark:bg-red-950 text-foreground border-accent-red border-b w-full",
className,
)}
>
<BannerIcon
className="text-accent-red-foreground"
icon={AlertTriangle}
/>
<BannerTitle className="font-medium flex items-center gap-2">
{llmError && embeddingError ? (
<>Provider errors - {errorMessage}</>
) : (
<>
{providerTitle} error - {errorMessage}
</>
)}
</BannerTitle>
<Button size="sm" onClick={() => router.push(settingsUrl)}>
Fix Setup
</Button>
</Banner>
);
}
return (
<Banner
className={cn(
"bg-red-50 dark:bg-red-950 text-foreground border-accent-red border-b w-full",
className,
)}
>
<BannerIcon
className="text-accent-red-foreground"
icon={AlertTriangle}
/>
<BannerTitle className="font-medium flex items-center gap-2">
{llmError && embeddingError ? (
<>Provider errors - {errorMessage}</>
) : (
<>
{providerTitle} error - {errorMessage}
</>
)}
</BannerTitle>
<Button size="sm" onClick={() => router.push(settingsUrl)}>
Fix Setup
</Button>
</Banner>
);
}
return null;
return null;
}

View file

@ -10,6 +10,7 @@ import {
useRef,
useState,
} from "react";
import { ONBOARDING_STEP_KEY } from "@/lib/constants";
export type EndpointType = "chat" | "langflow";
@ -81,6 +82,8 @@ interface ChatContextType {
setConversationFilter: (filter: KnowledgeFilter | null, responseId?: string | null) => void;
hasChatError: boolean;
setChatError: (hasError: boolean) => void;
isOnboardingComplete: boolean;
setOnboardingComplete: (complete: boolean) => void;
}
const ChatContext = createContext<ChatContextType | undefined>(undefined);
@ -111,6 +114,37 @@ export function ChatProvider({ children }: ChatProviderProps) {
const [conversationFilter, setConversationFilterState] =
useState<KnowledgeFilter | null>(null);
const [hasChatError, setChatError] = useState(false);
// Check if onboarding is complete (onboarding step key should be null)
const [isOnboardingComplete, setIsOnboardingComplete] = useState(() => {
if (typeof window === "undefined") return false;
return localStorage.getItem(ONBOARDING_STEP_KEY) === null;
});
// Sync onboarding completion state with localStorage
useEffect(() => {
const checkOnboarding = () => {
if (typeof window !== "undefined") {
setIsOnboardingComplete(
localStorage.getItem(ONBOARDING_STEP_KEY) === null,
);
}
};
// Check on mount
checkOnboarding();
// Listen for storage events (for cross-tab sync)
window.addEventListener("storage", checkOnboarding);
return () => {
window.removeEventListener("storage", checkOnboarding);
};
}, []);
const setOnboardingComplete = useCallback((complete: boolean) => {
setIsOnboardingComplete(complete);
}, []);
// Listen for ingestion failures and set chat error flag
useEffect(() => {
@ -375,6 +409,8 @@ export function ChatProvider({ children }: ChatProviderProps) {
setConversationFilter,
hasChatError,
setChatError,
isOnboardingComplete,
setOnboardingComplete,
}),
[
endpoint,
@ -396,6 +432,8 @@ export function ChatProvider({ children }: ChatProviderProps) {
conversationFilter,
setConversationFilter,
hasChatError,
isOnboardingComplete,
setOnboardingComplete,
],
);

View file

@ -12,6 +12,7 @@ export interface UploadFileResult {
raw: unknown;
createFilter?: boolean;
filename?: string;
taskId?: string;
}
export async function duplicateCheck(
@ -158,6 +159,7 @@ export async function uploadFile(
(uploadIngestJson as { upload?: { id?: string } }).upload?.id ||
(uploadIngestJson as { id?: string }).id ||
(uploadIngestJson as { task_id?: string }).task_id;
const taskId = (uploadIngestJson as { task_id?: string }).task_id;
const filePath =
(uploadIngestJson as { upload?: { path?: string } }).upload?.path ||
(uploadIngestJson as { path?: string }).path ||
@ -197,6 +199,7 @@ export async function uploadFile(
raw: uploadIngestJson,
createFilter: shouldCreateFilter,
filename,
taskId,
};
return result;

View file

@ -1,5 +1,6 @@
"""Provider validation utilities for testing API keys and models during onboarding."""
import json
import httpx
from utils.container_utils import transform_localhost_url
from utils.logging_config import get_logger
@ -7,6 +8,106 @@ from utils.logging_config import get_logger
logger = get_logger(__name__)
def _parse_json_error_message(error_text: str) -> str:
"""Parse JSON error message and extract just the message field."""
try:
# Try to parse as JSON
error_data = json.loads(error_text)
if isinstance(error_data, dict):
# WatsonX format: {"errors": [{"code": "...", "message": "..."}], ...}
if "errors" in error_data and isinstance(error_data["errors"], list):
errors = error_data["errors"]
if len(errors) > 0 and isinstance(errors[0], dict):
message = errors[0].get("message", "")
if message:
return message
code = errors[0].get("code", "")
if code:
return f"Error: {code}"
# OpenAI format: {"error": {"message": "...", "type": "...", "code": "..."}}
if "error" in error_data:
error_obj = error_data["error"]
if isinstance(error_obj, dict):
message = error_obj.get("message", "")
if message:
return message
# Direct message field
if "message" in error_data:
return error_data["message"]
# Generic format: {"detail": "..."}
if "detail" in error_data:
return error_data["detail"]
except (json.JSONDecodeError, ValueError, TypeError):
pass
# Return original text if not JSON or can't parse
return error_text
def _extract_error_details(response: httpx.Response) -> str:
"""Extract detailed error message from API response."""
try:
# Try to parse JSON error response
error_data = response.json()
# Common error response formats
if isinstance(error_data, dict):
# WatsonX format: {"errors": [{"code": "...", "message": "..."}], ...}
if "errors" in error_data and isinstance(error_data["errors"], list):
errors = error_data["errors"]
if len(errors) > 0 and isinstance(errors[0], dict):
# Extract just the message from the first error
message = errors[0].get("message", "")
if message:
return message
# Fallback to code if no message
code = errors[0].get("code", "")
if code:
return f"Error: {code}"
# OpenAI format: {"error": {"message": "...", "type": "...", "code": "..."}}
if "error" in error_data:
error_obj = error_data["error"]
if isinstance(error_obj, dict):
message = error_obj.get("message", "")
error_type = error_obj.get("type", "")
code = error_obj.get("code", "")
if message:
details = message
if error_type:
details += f" (type: {error_type})"
if code:
details += f" (code: {code})"
return details
# Anthropic format: {"error": {"message": "...", "type": "..."}}
if "message" in error_data:
return error_data["message"]
# Generic format: {"message": "..."}
if "detail" in error_data:
return error_data["detail"]
# If JSON parsing worked but no structured error found, try parsing text
response_text = response.text[:500]
parsed = _parse_json_error_message(response_text)
if parsed != response_text:
return parsed
return response_text
except (json.JSONDecodeError, ValueError):
# If JSON parsing fails, try parsing the text as JSON string
response_text = response.text[:500] if response.text else f"HTTP {response.status_code}"
parsed = _parse_json_error_message(response_text)
if parsed != response_text:
return parsed
return response_text
async def validate_provider_setup(
provider: str,
api_key: str = None,
@ -30,7 +131,7 @@ async def validate_provider_setup(
If False, performs lightweight validation (no credits consumed). Default: False.
Raises:
Exception: If validation fails with message "Setup failed, please try again or select a different provider."
Exception: If validation fails, raises the original exception with the actual error message.
"""
provider_lower = provider.lower()
@ -70,7 +171,8 @@ async def validate_provider_setup(
except Exception as e:
logger.error(f"Validation failed for provider {provider_lower}: {str(e)}")
raise Exception("Setup failed, please try again or select a different provider.")
# Preserve the original error message instead of replacing it with a generic one
raise
async def test_lightweight_health(
@ -155,8 +257,9 @@ async def _test_openai_lightweight_health(api_key: str) -> None:
)
if response.status_code != 200:
logger.error(f"OpenAI lightweight health check failed: {response.status_code}")
raise Exception(f"OpenAI API key validation failed: {response.status_code}")
error_details = _extract_error_details(response)
logger.error(f"OpenAI lightweight health check failed: {response.status_code} - {error_details}")
raise Exception(f"OpenAI API key validation failed: {error_details}")
logger.info("OpenAI lightweight health check passed")
@ -225,8 +328,9 @@ async def _test_openai_completion_with_tools(api_key: str, llm_model: str) -> No
)
if response.status_code != 200:
logger.error(f"OpenAI completion test failed: {response.status_code} - {response.text}")
raise Exception(f"OpenAI API error: {response.status_code}")
error_details = _extract_error_details(response)
logger.error(f"OpenAI completion test failed: {response.status_code} - {error_details}")
raise Exception(f"OpenAI API error: {error_details}")
logger.info("OpenAI completion with tool calling test passed")
@ -260,8 +364,9 @@ async def _test_openai_embedding(api_key: str, embedding_model: str) -> None:
)
if response.status_code != 200:
logger.error(f"OpenAI embedding test failed: {response.status_code} - {response.text}")
raise Exception(f"OpenAI API error: {response.status_code}")
error_details = _extract_error_details(response)
logger.error(f"OpenAI embedding test failed: {response.status_code} - {error_details}")
raise Exception(f"OpenAI API error: {error_details}")
data = response.json()
if not data.get("data") or len(data["data"]) == 0:
@ -300,8 +405,9 @@ async def _test_watsonx_lightweight_health(
)
if token_response.status_code != 200:
logger.error(f"IBM IAM token request failed: {token_response.status_code}")
raise Exception("Failed to authenticate with IBM Watson - invalid API key")
error_details = _extract_error_details(token_response)
logger.error(f"IBM IAM token request failed: {token_response.status_code} - {error_details}")
raise Exception(f"Failed to authenticate with IBM Watson: {error_details}")
bearer_token = token_response.json().get("access_token")
if not bearer_token:
@ -335,8 +441,9 @@ async def _test_watsonx_completion_with_tools(
)
if token_response.status_code != 200:
logger.error(f"IBM IAM token request failed: {token_response.status_code}")
raise Exception("Failed to authenticate with IBM Watson")
error_details = _extract_error_details(token_response)
logger.error(f"IBM IAM token request failed: {token_response.status_code} - {error_details}")
raise Exception(f"Failed to authenticate with IBM Watson: {error_details}")
bearer_token = token_response.json().get("access_token")
if not bearer_token:
@ -388,8 +495,11 @@ async def _test_watsonx_completion_with_tools(
)
if response.status_code != 200:
logger.error(f"IBM Watson completion test failed: {response.status_code} - {response.text}")
raise Exception(f"IBM Watson API error: {response.status_code}")
error_details = _extract_error_details(response)
logger.error(f"IBM Watson completion test failed: {response.status_code} - {error_details}")
# If error_details is still JSON, parse it to extract just the message
parsed_details = _parse_json_error_message(error_details)
raise Exception(f"IBM Watson API error: {parsed_details}")
logger.info("IBM Watson completion with tool calling test passed")
@ -398,6 +508,13 @@ async def _test_watsonx_completion_with_tools(
raise Exception("Request timed out")
except Exception as e:
logger.error(f"IBM Watson completion test failed: {str(e)}")
# If the error message contains JSON, parse it to extract just the message
error_str = str(e)
if "IBM Watson API error: " in error_str:
json_part = error_str.split("IBM Watson API error: ", 1)[1]
parsed_message = _parse_json_error_message(json_part)
if parsed_message != json_part:
raise Exception(f"IBM Watson API error: {parsed_message}")
raise
@ -419,8 +536,9 @@ async def _test_watsonx_embedding(
)
if token_response.status_code != 200:
logger.error(f"IBM IAM token request failed: {token_response.status_code}")
raise Exception("Failed to authenticate with IBM Watson")
error_details = _extract_error_details(token_response)
logger.error(f"IBM IAM token request failed: {token_response.status_code} - {error_details}")
raise Exception(f"Failed to authenticate with IBM Watson: {error_details}")
bearer_token = token_response.json().get("access_token")
if not bearer_token:
@ -450,8 +568,11 @@ async def _test_watsonx_embedding(
)
if response.status_code != 200:
logger.error(f"IBM Watson embedding test failed: {response.status_code} - {response.text}")
raise Exception(f"IBM Watson API error: {response.status_code}")
error_details = _extract_error_details(response)
logger.error(f"IBM Watson embedding test failed: {response.status_code} - {error_details}")
# If error_details is still JSON, parse it to extract just the message
parsed_details = _parse_json_error_message(error_details)
raise Exception(f"IBM Watson API error: {parsed_details}")
data = response.json()
if not data.get("results") or len(data["results"]) == 0:
@ -464,6 +585,13 @@ async def _test_watsonx_embedding(
raise Exception("Request timed out")
except Exception as e:
logger.error(f"IBM Watson embedding test failed: {str(e)}")
# If the error message contains JSON, parse it to extract just the message
error_str = str(e)
if "IBM Watson API error: " in error_str:
json_part = error_str.split("IBM Watson API error: ", 1)[1]
parsed_message = _parse_json_error_message(json_part)
if parsed_message != json_part:
raise Exception(f"IBM Watson API error: {parsed_message}")
raise
@ -483,8 +611,9 @@ async def _test_ollama_lightweight_health(endpoint: str) -> None:
)
if response.status_code != 200:
logger.error(f"Ollama lightweight health check failed: {response.status_code}")
raise Exception(f"Ollama endpoint not responding: {response.status_code}")
error_details = _extract_error_details(response)
logger.error(f"Ollama lightweight health check failed: {response.status_code} - {error_details}")
raise Exception(f"Ollama endpoint not responding: {error_details}")
logger.info("Ollama lightweight health check passed")
@ -537,8 +666,9 @@ async def _test_ollama_completion_with_tools(llm_model: str, endpoint: str) -> N
)
if response.status_code != 200:
logger.error(f"Ollama completion test failed: {response.status_code} - {response.text}")
raise Exception(f"Ollama API error: {response.status_code}")
error_details = _extract_error_details(response)
logger.error(f"Ollama completion test failed: {response.status_code} - {error_details}")
raise Exception(f"Ollama API error: {error_details}")
logger.info("Ollama completion with tool calling test passed")
@ -569,8 +699,9 @@ async def _test_ollama_embedding(embedding_model: str, endpoint: str) -> None:
)
if response.status_code != 200:
logger.error(f"Ollama embedding test failed: {response.status_code} - {response.text}")
raise Exception(f"Ollama API error: {response.status_code}")
error_details = _extract_error_details(response)
logger.error(f"Ollama embedding test failed: {response.status_code} - {error_details}")
raise Exception(f"Ollama API error: {error_details}")
data = response.json()
if not data.get("embedding"):
@ -616,8 +747,9 @@ async def _test_anthropic_lightweight_health(api_key: str) -> None:
)
if response.status_code != 200:
logger.error(f"Anthropic lightweight health check failed: {response.status_code}")
raise Exception(f"Anthropic API key validation failed: {response.status_code}")
error_details = _extract_error_details(response)
logger.error(f"Anthropic lightweight health check failed: {response.status_code} - {error_details}")
raise Exception(f"Anthropic API key validation failed: {error_details}")
logger.info("Anthropic lightweight health check passed")
@ -672,8 +804,9 @@ async def _test_anthropic_completion_with_tools(api_key: str, llm_model: str) ->
)
if response.status_code != 200:
logger.error(f"Anthropic completion test failed: {response.status_code} - {response.text}")
raise Exception(f"Anthropic API error: {response.status_code}")
error_details = _extract_error_details(response)
logger.error(f"Anthropic completion test failed: {response.status_code} - {error_details}")
raise Exception(f"Anthropic API error: {error_details}")
logger.info("Anthropic completion with tool calling test passed")