Merge remote-tracking branch 'origin/fix/chat_filter_clearing' into all-merges

This commit is contained in:
Lucas Oliveira 2025-12-05 17:14:23 -03:00
commit 2c9e8c520e
12 changed files with 1952 additions and 1652 deletions

View file

@ -4,6 +4,7 @@ import {
useQueryClient,
} from "@tanstack/react-query";
import type { EndpointType } from "@/contexts/chat-context";
import { useChat } from "@/contexts/chat-context";
export interface RawConversation {
response_id: string;
@ -50,6 +51,7 @@ export const useGetConversationsQuery = (
options?: Omit<UseQueryOptions, "queryKey" | "queryFn">,
) => {
const queryClient = useQueryClient();
const { isOnboardingComplete } = useChat();
async function getConversations(context: { signal?: AbortSignal }): Promise<ChatConversation[]> {
try {
@ -95,6 +97,11 @@ export const useGetConversationsQuery = (
}
}
// Extract enabled from options and combine with onboarding completion check
// Query is only enabled if onboarding is complete AND the caller's enabled condition is met
const callerEnabled = options?.enabled ?? true;
const enabled = isOnboardingComplete && callerEnabled;
const queryResult = useQuery(
{
queryKey: ["conversations", endpoint, refreshTrigger],
@ -106,6 +113,7 @@ export const useGetConversationsQuery = (
refetchOnMount: false, // Don't refetch on every mount
refetchOnWindowFocus: false, // Don't refetch when window regains focus
...options,
enabled, // Override enabled after spreading options to ensure onboarding check is applied
},
queryClient,
);

View file

@ -3,6 +3,8 @@ import {
useQuery,
useQueryClient,
} from "@tanstack/react-query";
import { useChat } from "@/contexts/chat-context";
import { useProviderHealthQuery } from "./useProviderHealthQuery";
type Nudge = string;
@ -27,6 +29,13 @@ export const useGetNudgesQuery = (
) => {
const { chatId, filters, limit, scoreThreshold } = params ?? {};
const queryClient = useQueryClient();
const { isOnboardingComplete } = useChat();
// Check if LLM provider is healthy
// If health data is not available yet, assume healthy (optimistic)
// Only disable if health data exists and shows LLM error
const { data: health } = useProviderHealthQuery();
const isLLMHealthy = health === undefined || (health?.status === "healthy" && !health?.llm_error);
function cancel() {
queryClient.removeQueries({
@ -77,6 +86,11 @@ export const useGetNudgesQuery = (
}
}
// Extract enabled from options and combine with onboarding completion and LLM health checks
// Query is only enabled if onboarding is complete AND LLM provider is healthy AND the caller's enabled condition is met
const callerEnabled = options?.enabled ?? true;
const enabled = isOnboardingComplete && isLLMHealthy && callerEnabled;
const queryResult = useQuery(
{
queryKey: ["nudges", chatId, filters, limit, scoreThreshold],
@ -91,6 +105,7 @@ export const useGetNudgesQuery = (
return Array.isArray(data) && data.length === 0 ? 5000 : false;
},
...options,
enabled, // Override enabled after spreading options to ensure onboarding check is applied
},
queryClient,
);

View file

@ -5,6 +5,7 @@ import {
} from "@tanstack/react-query";
import { useChat } from "@/contexts/chat-context";
import { useGetSettingsQuery } from "./useGetSettingsQuery";
import { useGetTasksQuery } from "./useGetTasksQuery";
export interface ProviderHealthDetails {
llm_model: string;
@ -40,11 +41,20 @@ export const useProviderHealthQuery = (
) => {
const queryClient = useQueryClient();
// Get chat error state from context (ChatProvider wraps the entire app in layout.tsx)
const { hasChatError, setChatError } = useChat();
// Get chat error state and onboarding completion from context (ChatProvider wraps the entire app in layout.tsx)
const { hasChatError, setChatError, isOnboardingComplete } = useChat();
const { data: settings = {} } = useGetSettingsQuery();
// Check if there are any active ingestion tasks
const { data: tasks = [] } = useGetTasksQuery();
const hasActiveIngestion = tasks.some(
(task) =>
task.status === "pending" ||
task.status === "running" ||
task.status === "processing",
);
async function checkProviderHealth(): Promise<ProviderHealthResponse> {
try {
const url = new URL("/api/provider/health", window.location.origin);
@ -55,6 +65,7 @@ export const useProviderHealthQuery = (
}
// Add test_completion query param if specified or if chat error exists
// Use the same testCompletion value that's in the queryKey
const testCompletion = params?.test_completion ?? hasChatError;
if (testCompletion) {
url.searchParams.set("test_completion", "true");
@ -101,7 +112,10 @@ export const useProviderHealthQuery = (
}
}
const queryKey = ["provider", "health", params?.test_completion];
// Include hasChatError in queryKey so React Query refetches when it changes
// This ensures the health check runs with test_completion=true when chat errors occur
const testCompletion = params?.test_completion ?? hasChatError;
const queryKey = ["provider", "health", testCompletion, hasChatError];
const failureCountKey = queryKey.join("-");
const queryResult = useQuery(
@ -143,7 +157,11 @@ export const useProviderHealthQuery = (
refetchOnWindowFocus: false, // Disabled to reduce unnecessary calls on tab switches
refetchOnMount: true,
staleTime: 30000, // Consider data stale after 30 seconds
enabled: !!settings?.edited && options?.enabled !== false, // Only run after onboarding is complete
enabled:
!!settings?.edited &&
isOnboardingComplete &&
!hasActiveIngestion && // Disable health checks when ingestion is happening
options?.enabled !== false, // Only run after onboarding is complete
...options,
},
queryClient,

File diff suppressed because it is too large Load diff

View file

@ -1,13 +1,13 @@
import type { Dispatch, SetStateAction } from "react";
import { useEffect, useState } from "react";
import IBMLogo from "@/components/icons/ibm-logo";
import { LabelInput } from "@/components/label-input";
import { LabelWrapper } from "@/components/label-wrapper";
import IBMLogo from "@/components/icons/ibm-logo";
import { Switch } from "@/components/ui/switch";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/ui/tooltip";
import { useDebouncedValue } from "@/lib/debounce";
import type { OnboardingVariables } from "../../api/mutations/useOnboardingMutation";
@ -18,273 +18,273 @@ import { AdvancedOnboarding } from "./advanced";
import { ModelSelector } from "./model-selector";
export function IBMOnboarding({
isEmbedding = false,
setSettings,
sampleDataset,
setSampleDataset,
setIsLoadingModels,
alreadyConfigured = false,
existingEndpoint,
existingProjectId,
hasEnvApiKey = false,
isEmbedding = false,
setSettings,
sampleDataset,
setSampleDataset,
setIsLoadingModels,
alreadyConfigured = false,
existingEndpoint,
existingProjectId,
hasEnvApiKey = false,
}: {
isEmbedding?: boolean;
setSettings: Dispatch<SetStateAction<OnboardingVariables>>;
sampleDataset: boolean;
setSampleDataset: (dataset: boolean) => void;
setIsLoadingModels?: (isLoading: boolean) => void;
alreadyConfigured?: boolean;
existingEndpoint?: string;
existingProjectId?: string;
hasEnvApiKey?: boolean;
isEmbedding?: boolean;
setSettings: Dispatch<SetStateAction<OnboardingVariables>>;
sampleDataset: boolean;
setSampleDataset: (dataset: boolean) => void;
setIsLoadingModels?: (isLoading: boolean) => void;
alreadyConfigured?: boolean;
existingEndpoint?: string;
existingProjectId?: string;
hasEnvApiKey?: boolean;
}) {
const [endpoint, setEndpoint] = useState(
alreadyConfigured ? "" : (existingEndpoint || "https://us-south.ml.cloud.ibm.com"),
);
const [apiKey, setApiKey] = useState("");
const [getFromEnv, setGetFromEnv] = useState(
hasEnvApiKey && !alreadyConfigured,
);
const [projectId, setProjectId] = useState(
alreadyConfigured ? "" : (existingProjectId || ""),
);
const [endpoint, setEndpoint] = useState(
alreadyConfigured
? ""
: existingEndpoint || "https://us-south.ml.cloud.ibm.com",
);
const [apiKey, setApiKey] = useState("");
const [getFromEnv, setGetFromEnv] = useState(
hasEnvApiKey && !alreadyConfigured,
);
const [projectId, setProjectId] = useState(
alreadyConfigured ? "" : existingProjectId || "",
);
const options = [
{
value: "https://us-south.ml.cloud.ibm.com",
label: "https://us-south.ml.cloud.ibm.com",
default: true,
},
{
value: "https://eu-de.ml.cloud.ibm.com",
label: "https://eu-de.ml.cloud.ibm.com",
default: false,
},
{
value: "https://eu-gb.ml.cloud.ibm.com",
label: "https://eu-gb.ml.cloud.ibm.com",
default: false,
},
{
value: "https://au-syd.ml.cloud.ibm.com",
label: "https://au-syd.ml.cloud.ibm.com",
default: false,
},
{
value: "https://jp-tok.ml.cloud.ibm.com",
label: "https://jp-tok.ml.cloud.ibm.com",
default: false,
},
{
value: "https://ca-tor.ml.cloud.ibm.com",
label: "https://ca-tor.ml.cloud.ibm.com",
default: false,
},
];
const debouncedEndpoint = useDebouncedValue(endpoint, 500);
const debouncedApiKey = useDebouncedValue(apiKey, 500);
const debouncedProjectId = useDebouncedValue(projectId, 500);
const options = [
{
value: "https://us-south.ml.cloud.ibm.com",
label: "https://us-south.ml.cloud.ibm.com",
default: true,
},
{
value: "https://eu-de.ml.cloud.ibm.com",
label: "https://eu-de.ml.cloud.ibm.com",
default: false,
},
{
value: "https://eu-gb.ml.cloud.ibm.com",
label: "https://eu-gb.ml.cloud.ibm.com",
default: false,
},
{
value: "https://au-syd.ml.cloud.ibm.com",
label: "https://au-syd.ml.cloud.ibm.com",
default: false,
},
{
value: "https://jp-tok.ml.cloud.ibm.com",
label: "https://jp-tok.ml.cloud.ibm.com",
default: false,
},
{
value: "https://ca-tor.ml.cloud.ibm.com",
label: "https://ca-tor.ml.cloud.ibm.com",
default: false,
},
];
const debouncedEndpoint = useDebouncedValue(endpoint, 500);
const debouncedApiKey = useDebouncedValue(apiKey, 500);
const debouncedProjectId = useDebouncedValue(projectId, 500);
// Fetch models from API when all credentials are provided
const {
data: modelsData,
isLoading: isLoadingModels,
error: modelsError,
} = useGetIBMModelsQuery(
{
endpoint: debouncedEndpoint ? debouncedEndpoint : undefined,
apiKey: getFromEnv ? "" : (debouncedApiKey ? debouncedApiKey : undefined),
projectId: debouncedProjectId ? debouncedProjectId : undefined,
},
{
enabled:
!!debouncedEndpoint ||
!!debouncedApiKey ||
!!debouncedProjectId ||
getFromEnv ||
alreadyConfigured,
},
);
// Fetch models from API when all credentials are provided
const {
data: modelsData,
isLoading: isLoadingModels,
error: modelsError,
} = useGetIBMModelsQuery(
{
endpoint: debouncedEndpoint ? debouncedEndpoint : undefined,
apiKey: getFromEnv ? "" : debouncedApiKey ? debouncedApiKey : undefined,
projectId: debouncedProjectId ? debouncedProjectId : undefined,
},
{
enabled:
(!!debouncedEndpoint && !!debouncedApiKey && !!debouncedProjectId) ||
getFromEnv ||
alreadyConfigured,
},
);
// Use custom hook for model selection logic
const {
languageModel,
embeddingModel,
setLanguageModel,
setEmbeddingModel,
languageModels,
embeddingModels,
} = useModelSelection(modelsData, isEmbedding);
// Use custom hook for model selection logic
const {
languageModel,
embeddingModel,
setLanguageModel,
setEmbeddingModel,
languageModels,
embeddingModels,
} = useModelSelection(modelsData, isEmbedding);
const handleGetFromEnvChange = (fromEnv: boolean) => {
setGetFromEnv(fromEnv);
if (fromEnv) {
setApiKey("");
}
setEmbeddingModel?.("");
setLanguageModel?.("");
};
const handleGetFromEnvChange = (fromEnv: boolean) => {
setGetFromEnv(fromEnv);
if (fromEnv) {
setApiKey("");
}
setEmbeddingModel?.("");
setLanguageModel?.("");
};
const handleSampleDatasetChange = (dataset: boolean) => {
setSampleDataset(dataset);
};
const handleSampleDatasetChange = (dataset: boolean) => {
setSampleDataset(dataset);
};
useEffect(() => {
setIsLoadingModels?.(isLoadingModels);
}, [isLoadingModels, setIsLoadingModels]);
useEffect(() => {
setIsLoadingModels?.(isLoadingModels);
}, [isLoadingModels, setIsLoadingModels]);
// Update settings when values change
useUpdateSettings(
"watsonx",
{
endpoint,
apiKey,
projectId,
languageModel,
embeddingModel,
},
setSettings,
isEmbedding,
);
// Update settings when values change
useUpdateSettings(
"watsonx",
{
endpoint,
apiKey,
projectId,
languageModel,
embeddingModel,
},
setSettings,
isEmbedding,
);
return (
<>
<div className="space-y-4">
<LabelWrapper
label="watsonx.ai API Endpoint"
helperText="Base URL of the API"
id="api-endpoint"
required
>
<div className="space-y-1">
<ModelSelector
options={alreadyConfigured ? [] : options}
value={endpoint}
custom
onValueChange={alreadyConfigured ? () => {} : setEndpoint}
searchPlaceholder="Search endpoint..."
noOptionsPlaceholder={
alreadyConfigured
? "https://•••••••••••••••••••••••••••••••••••••••••"
: "No endpoints available"
}
placeholder="Select endpoint..."
/>
{alreadyConfigured && (
<p className="text-mmd text-muted-foreground">
Reusing endpoint from model provider selection.
</p>
)}
</div>
</LabelWrapper>
return (
<>
<div className="space-y-4">
<LabelWrapper
label="watsonx.ai API Endpoint"
helperText="Base URL of the API"
id="api-endpoint"
required
>
<div className="space-y-1">
<ModelSelector
options={alreadyConfigured ? [] : options}
value={endpoint}
custom
onValueChange={alreadyConfigured ? () => {} : setEndpoint}
searchPlaceholder="Search endpoint..."
noOptionsPlaceholder={
alreadyConfigured
? "https://•••••••••••••••••••••••••••••••••••••••••"
: "No endpoints available"
}
placeholder="Select endpoint..."
/>
{alreadyConfigured && (
<p className="text-mmd text-muted-foreground">
Reusing endpoint from model provider selection.
</p>
)}
</div>
</LabelWrapper>
<div className="space-y-1">
<LabelInput
label="watsonx Project ID"
helperText="Project ID for the model"
id="project-id"
required
placeholder={
alreadyConfigured ? "••••••••••••••••••••••••" : "your-project-id"
}
value={projectId}
onChange={(e) => setProjectId(e.target.value)}
disabled={alreadyConfigured}
/>
{alreadyConfigured && (
<p className="text-mmd text-muted-foreground">
Reusing project ID from model provider selection.
</p>
)}
</div>
<LabelWrapper
label="Use environment watsonx API key"
id="get-api-key"
description="Reuse the key from your environment config. Turn off to enter a different key."
flex
>
<Tooltip>
<TooltipTrigger asChild>
<div>
<Switch
checked={getFromEnv}
onCheckedChange={handleGetFromEnvChange}
disabled={!hasEnvApiKey || alreadyConfigured}
/>
</div>
</TooltipTrigger>
{!hasEnvApiKey && !alreadyConfigured && (
<TooltipContent>
watsonx API key not detected in the environment.
</TooltipContent>
)}
</Tooltip>
</LabelWrapper>
{!getFromEnv && !alreadyConfigured && (
<div className="space-y-1">
<LabelInput
label="watsonx API key"
helperText="API key to access watsonx.ai"
className={modelsError ? "!border-destructive" : ""}
id="api-key"
type="password"
required
placeholder="your-api-key"
value={apiKey}
onChange={(e) => setApiKey(e.target.value)}
/>
{isLoadingModels && (
<p className="text-mmd text-muted-foreground">
Validating API key...
</p>
)}
{modelsError && (
<p className="text-mmd text-destructive">
Invalid watsonx API key. Verify or replace the key.
</p>
)}
</div>
)}
{alreadyConfigured && (
<div className="space-y-1">
<LabelInput
label="watsonx API key"
helperText="API key to access watsonx.ai"
id="api-key"
type="password"
required
placeholder="•••••••••••••••••••••••••••••••••••••••••"
value={apiKey}
onChange={(e) => setApiKey(e.target.value)}
disabled={true}
/>
<p className="text-mmd text-muted-foreground">
Reusing API key from model provider selection.
</p>
</div>
)}
{getFromEnv && isLoadingModels && (
<p className="text-mmd text-muted-foreground">
Validating configuration...
</p>
)}
{getFromEnv && modelsError && (
<p className="text-mmd text-accent-amber-foreground">
Connection failed. Check your configuration.
</p>
)}
</div>
<AdvancedOnboarding
icon={<IBMLogo className="w-4 h-4" />}
languageModels={languageModels}
embeddingModels={embeddingModels}
languageModel={languageModel}
embeddingModel={embeddingModel}
sampleDataset={sampleDataset}
setLanguageModel={setLanguageModel}
setEmbeddingModel={setEmbeddingModel}
setSampleDataset={handleSampleDatasetChange}
/>
</>
);
<div className="space-y-1">
<LabelInput
label="watsonx Project ID"
helperText="Project ID for the model"
id="project-id"
required
placeholder={
alreadyConfigured ? "••••••••••••••••••••••••" : "your-project-id"
}
value={projectId}
onChange={(e) => setProjectId(e.target.value)}
disabled={alreadyConfigured}
/>
{alreadyConfigured && (
<p className="text-mmd text-muted-foreground">
Reusing project ID from model provider selection.
</p>
)}
</div>
<LabelWrapper
label="Use environment watsonx API key"
id="get-api-key"
description="Reuse the key from your environment config. Turn off to enter a different key."
flex
>
<Tooltip>
<TooltipTrigger asChild>
<div>
<Switch
checked={getFromEnv}
onCheckedChange={handleGetFromEnvChange}
disabled={!hasEnvApiKey || alreadyConfigured}
/>
</div>
</TooltipTrigger>
{!hasEnvApiKey && !alreadyConfigured && (
<TooltipContent>
watsonx API key not detected in the environment.
</TooltipContent>
)}
</Tooltip>
</LabelWrapper>
{!getFromEnv && !alreadyConfigured && (
<div className="space-y-1">
<LabelInput
label="watsonx API key"
helperText="API key to access watsonx.ai"
className={modelsError ? "!border-destructive" : ""}
id="api-key"
type="password"
required
placeholder="your-api-key"
value={apiKey}
onChange={(e) => setApiKey(e.target.value)}
/>
{isLoadingModels && (
<p className="text-mmd text-muted-foreground">
Validating API key...
</p>
)}
{modelsError && (
<p className="text-mmd text-destructive">
Invalid watsonx API key. Verify or replace the key.
</p>
)}
</div>
)}
{alreadyConfigured && (
<div className="space-y-1">
<LabelInput
label="watsonx API key"
helperText="API key to access watsonx.ai"
id="api-key"
type="password"
required
placeholder="•••••••••••••••••••••••••••••••••••••••••"
value={apiKey}
onChange={(e) => setApiKey(e.target.value)}
disabled={true}
/>
<p className="text-mmd text-muted-foreground">
Reusing API key from model provider selection.
</p>
</div>
)}
{getFromEnv && isLoadingModels && (
<p className="text-mmd text-muted-foreground">
Validating configuration...
</p>
)}
{getFromEnv && modelsError && (
<p className="text-mmd text-accent-amber-foreground">
Connection failed. Check your configuration.
</p>
)}
</div>
<AdvancedOnboarding
icon={<IBMLogo className="w-4 h-4" />}
languageModels={languageModels}
embeddingModels={embeddingModels}
languageModel={languageModel}
embeddingModel={embeddingModel}
sampleDataset={sampleDataset}
setLanguageModel={setLanguageModel}
setEmbeddingModel={setEmbeddingModel}
setSampleDataset={handleSampleDatasetChange}
/>
</>
);
}

View file

@ -507,7 +507,7 @@ const OnboardingCard = ({
hasEnvApiKey={
currentSettings?.providers?.openai?.has_api_key === true
}
alreadyConfigured={providerAlreadyConfigured}
alreadyConfigured={providerAlreadyConfigured && modelProvider === "openai"}
/>
</TabsContent>
<TabsContent value="watsonx">
@ -517,7 +517,7 @@ const OnboardingCard = ({
setSampleDataset={setSampleDataset}
setIsLoadingModels={setIsLoadingModels}
isEmbedding={isEmbedding}
alreadyConfigured={providerAlreadyConfigured}
alreadyConfigured={providerAlreadyConfigured && modelProvider === "watsonx"}
existingEndpoint={currentSettings?.providers?.watsonx?.endpoint}
existingProjectId={currentSettings?.providers?.watsonx?.project_id}
hasEnvApiKey={currentSettings?.providers?.watsonx?.has_api_key === true}
@ -530,7 +530,7 @@ const OnboardingCard = ({
setSampleDataset={setSampleDataset}
setIsLoadingModels={setIsLoadingModels}
isEmbedding={isEmbedding}
alreadyConfigured={providerAlreadyConfigured}
alreadyConfigured={providerAlreadyConfigured && modelProvider === "ollama"}
existingEndpoint={currentSettings?.providers?.ollama?.endpoint}
/>
</TabsContent>

View file

@ -21,6 +21,7 @@ const OnboardingUpload = ({ onComplete }: OnboardingUploadProps) => {
const [isUploading, setIsUploading] = useState(false);
const [currentStep, setCurrentStep] = useState<number | null>(null);
const [uploadedFilename, setUploadedFilename] = useState<string | null>(null);
const [uploadedTaskId, setUploadedTaskId] = useState<string | null>(null);
const [shouldCreateFilter, setShouldCreateFilter] = useState(false);
const [isCreatingFilter, setIsCreatingFilter] = useState(false);
@ -43,23 +44,26 @@ const OnboardingUpload = ({ onComplete }: OnboardingUploadProps) => {
// Monitor tasks and call onComplete when file processing is done
useEffect(() => {
if (currentStep === null || !tasks) {
if (currentStep === null || !tasks || !uploadedTaskId) {
return;
}
// Check if there are any active tasks (pending, running, or processing)
const activeTasks = tasks.find(
(task) =>
task.status === "pending" ||
task.status === "running" ||
task.status === "processing",
);
// Find the task by task ID from the upload response
const matchingTask = tasks.find((task) => task.task_id === uploadedTaskId);
// If no active tasks and we have more than 1 task (initial + new upload), complete it
if (
(!activeTasks || (activeTasks.processed_files ?? 0) > 0) &&
tasks.length > 1
) {
// If no matching task found, wait for it to appear
if (!matchingTask) {
return;
}
// Check if the matching task is still active (pending, running, or processing)
const isTaskActive =
matchingTask.status === "pending" ||
matchingTask.status === "running" ||
matchingTask.status === "processing";
// If task is completed or has processed files, complete the onboarding step
if (!isTaskActive || (matchingTask.processed_files ?? 0) > 0) {
// Set to final step to show "Done"
setCurrentStep(STEP_LIST.length);
@ -91,6 +95,7 @@ const OnboardingUpload = ({ onComplete }: OnboardingUploadProps) => {
icon: "file",
});
// Wait for filter creation to complete before proceeding
createFilterMutation
.mutateAsync({
name: displayName,
@ -114,18 +119,36 @@ const OnboardingUpload = ({ onComplete }: OnboardingUploadProps) => {
})
.finally(() => {
setIsCreatingFilter(false);
// Refetch nudges to get new ones
refetchNudges();
// Wait a bit before completing (after filter is created)
setTimeout(() => {
onComplete();
}, 1000);
});
} else {
// No filter to create, just complete
// Refetch nudges to get new ones
refetchNudges();
// Wait a bit before completing
setTimeout(() => {
onComplete();
}, 1000);
}
// Refetch nudges to get new ones
refetchNudges();
// Wait a bit before completing
setTimeout(() => {
onComplete();
}, 1000);
}
}, [tasks, currentStep, onComplete, refetchNudges, shouldCreateFilter, uploadedFilename]);
}, [
tasks,
currentStep,
onComplete,
refetchNudges,
shouldCreateFilter,
uploadedFilename,
uploadedTaskId,
createFilterMutation,
isCreatingFilter,
]);
const resetFileInput = () => {
if (fileInputRef.current) {
@ -144,6 +167,11 @@ const OnboardingUpload = ({ onComplete }: OnboardingUploadProps) => {
const result = await uploadFile(file, true, true); // Pass createFilter=true
console.log("Document upload task started successfully");
// Store task ID to track the specific upload task
if (result.taskId) {
setUploadedTaskId(result.taskId);
}
// Store filename and createFilter flag in state to create filter after ingestion succeeds
if (result.createFilter && result.filename) {
setUploadedFilename(result.filename);
@ -176,6 +204,7 @@ const OnboardingUpload = ({ onComplete }: OnboardingUploadProps) => {
// Reset on error
setCurrentStep(null);
setUploadedTaskId(null);
} finally {
setIsUploading(false);
}

View file

@ -47,8 +47,7 @@ export function ChatRenderer({
refreshConversations,
startNewConversation,
setConversationFilter,
setCurrentConversationId,
setPreviousResponseIds,
setOnboardingComplete,
} = useChat();
// Initialize onboarding state based on local storage and settings
@ -170,12 +169,17 @@ export function ChatRenderer({
localStorage.removeItem(ONBOARDING_UPLOAD_STEPS_KEY);
}
// Clear ALL conversation state so next message starts fresh
await startNewConversation();
// Mark onboarding as complete in context
setOnboardingComplete(true);
// Store the user document filter as default for new conversations and load it
// Store the user document filter as default for new conversations FIRST
// This must happen before startNewConversation() so the filter is available
await storeDefaultFilterForNewConversations(true);
// Clear ALL conversation state so next message starts fresh
// This will pick up the default filter we just set
await startNewConversation();
// Clean up onboarding filter IDs now that we've set the default
if (typeof window !== "undefined") {
localStorage.removeItem(ONBOARDING_OPENRAG_DOCS_FILTER_ID_KEY);
@ -202,6 +206,8 @@ export function ChatRenderer({
localStorage.removeItem(ONBOARDING_CARD_STEPS_KEY);
localStorage.removeItem(ONBOARDING_UPLOAD_STEPS_KEY);
}
// Mark onboarding as complete in context
setOnboardingComplete(true);
// Store the OpenRAG docs filter as default for new conversations
storeDefaultFilterForNewConversations(false);
setShowLayout(true);

View file

@ -5,125 +5,131 @@ import { useRouter } from "next/navigation";
import { useProviderHealthQuery } from "@/app/api/queries/useProviderHealthQuery";
import type { ModelProvider } from "@/app/settings/_helpers/model-helpers";
import { Banner, BannerIcon, BannerTitle } from "@/components/ui/banner";
import { cn } from "@/lib/utils";
import { useChat } from "@/contexts/chat-context";
import { cn } from "@/lib/utils";
import { Button } from "./ui/button";
interface ProviderHealthBannerProps {
className?: string;
className?: string;
}
// Custom hook to check provider health status
export function useProviderHealth() {
const { hasChatError } = useChat();
const {
data: health,
isLoading,
isFetching,
error,
isError,
} = useProviderHealthQuery({
test_completion: hasChatError, // Use test_completion=true when chat errors occur
});
const { hasChatError } = useChat();
const {
data: health,
isLoading,
isFetching,
error,
isError,
} = useProviderHealthQuery({
test_completion: hasChatError, // Use test_completion=true when chat errors occur
});
const isHealthy = health?.status === "healthy" && !isError;
// Only consider unhealthy if backend is up but provider validation failed
// Don't show banner if backend is unavailable
const isUnhealthy =
health?.status === "unhealthy" || health?.status === "error";
const isBackendUnavailable =
health?.status === "backend-unavailable" || isError;
const isHealthy = health?.status === "healthy" && !isError;
// Only consider unhealthy if backend is up but provider validation failed
// Don't show banner if backend is unavailable
const isUnhealthy =
health?.status === "unhealthy" || health?.status === "error";
const isBackendUnavailable =
health?.status === "backend-unavailable" || isError;
return {
health,
isLoading,
isFetching,
error,
isError,
isHealthy,
isUnhealthy,
isBackendUnavailable,
};
return {
health,
isLoading,
isFetching,
error,
isError,
isHealthy,
isUnhealthy,
isBackendUnavailable,
};
}
const providerTitleMap: Record<ModelProvider, string> = {
openai: "OpenAI",
anthropic: "Anthropic",
ollama: "Ollama",
watsonx: "IBM watsonx.ai",
openai: "OpenAI",
anthropic: "Anthropic",
ollama: "Ollama",
watsonx: "IBM watsonx.ai",
};
export function ProviderHealthBanner({ className }: ProviderHealthBannerProps) {
const { isLoading, isHealthy, isUnhealthy, health } = useProviderHealth();
const router = useRouter();
const { isLoading, isHealthy, isUnhealthy, health } = useProviderHealth();
const router = useRouter();
// Only show banner when provider is unhealthy (not when backend is unavailable)
if (isLoading || isHealthy) {
return null;
}
// Only show banner when provider is unhealthy (not when backend is unavailable)
if (isLoading || isHealthy) {
return null;
}
if (isUnhealthy) {
const llmProvider = health?.llm_provider || health?.provider;
const embeddingProvider = health?.embedding_provider;
const llmError = health?.llm_error;
const embeddingError = health?.embedding_error;
if (isUnhealthy) {
const llmProvider = health?.llm_provider || health?.provider;
const embeddingProvider = health?.embedding_provider;
const llmError = health?.llm_error;
const embeddingError = health?.embedding_error;
// Determine which provider has the error
let errorProvider: string | undefined;
let errorMessage: string;
// Determine which provider has the error
let errorProvider: string | undefined;
let errorMessage: string;
if (llmError && embeddingError) {
// Both have errors - show combined message
errorMessage = health?.message || "Provider validation failed";
errorProvider = undefined; // Don't link to a specific provider
} else if (llmError) {
// Only LLM has error
errorProvider = llmProvider;
errorMessage = llmError;
} else if (embeddingError) {
// Only embedding has error
errorProvider = embeddingProvider;
errorMessage = embeddingError;
} else {
// Fallback to original message
errorMessage = health?.message || "Provider validation failed";
errorProvider = llmProvider;
}
if (llmError && embeddingError) {
// Both have errors - check if they're the same
if (llmError === embeddingError) {
// Same error for both - show once
errorMessage = llmError;
} else {
// Different errors - show both
errorMessage = `${llmError}; ${embeddingError}`;
}
errorProvider = undefined; // Don't link to a specific provider
} else if (llmError) {
// Only LLM has error
errorProvider = llmProvider;
errorMessage = llmError;
} else if (embeddingError) {
// Only embedding has error
errorProvider = embeddingProvider;
errorMessage = embeddingError;
} else {
// Fallback to original message
errorMessage = health?.message || "Provider validation failed";
errorProvider = llmProvider;
}
const providerTitle = errorProvider
? providerTitleMap[errorProvider as ModelProvider] || errorProvider
: "Provider";
const providerTitle = errorProvider
? providerTitleMap[errorProvider as ModelProvider] || errorProvider
: "Provider";
const settingsUrl = errorProvider
? `/settings?setup=${errorProvider}`
: "/settings";
const settingsUrl = errorProvider
? `/settings?setup=${errorProvider}`
: "/settings";
return (
<Banner
className={cn(
"bg-red-50 dark:bg-red-950 text-foreground border-accent-red border-b w-full",
className,
)}
>
<BannerIcon
className="text-accent-red-foreground"
icon={AlertTriangle}
/>
<BannerTitle className="font-medium flex items-center gap-2">
{llmError && embeddingError ? (
<>Provider errors - {errorMessage}</>
) : (
<>
{providerTitle} error - {errorMessage}
</>
)}
</BannerTitle>
<Button size="sm" onClick={() => router.push(settingsUrl)}>
Fix Setup
</Button>
</Banner>
);
}
return (
<Banner
className={cn(
"bg-red-50 dark:bg-red-950 text-foreground border-accent-red border-b w-full",
className,
)}
>
<BannerIcon
className="text-accent-red-foreground"
icon={AlertTriangle}
/>
<BannerTitle className="font-medium flex items-center gap-2">
{llmError && embeddingError ? (
<>Provider errors - {errorMessage}</>
) : (
<>
{providerTitle} error - {errorMessage}
</>
)}
</BannerTitle>
<Button size="sm" onClick={() => router.push(settingsUrl)}>
Fix Setup
</Button>
</Banner>
);
}
return null;
return null;
}

View file

@ -10,6 +10,7 @@ import {
useRef,
useState,
} from "react";
import { ONBOARDING_STEP_KEY } from "@/lib/constants";
export type EndpointType = "chat" | "langflow";
@ -81,6 +82,8 @@ interface ChatContextType {
setConversationFilter: (filter: KnowledgeFilter | null, responseId?: string | null) => void;
hasChatError: boolean;
setChatError: (hasError: boolean) => void;
isOnboardingComplete: boolean;
setOnboardingComplete: (complete: boolean) => void;
}
const ChatContext = createContext<ChatContextType | undefined>(undefined);
@ -111,6 +114,37 @@ export function ChatProvider({ children }: ChatProviderProps) {
const [conversationFilter, setConversationFilterState] =
useState<KnowledgeFilter | null>(null);
const [hasChatError, setChatError] = useState(false);
// Check if onboarding is complete (onboarding step key should be null)
const [isOnboardingComplete, setIsOnboardingComplete] = useState(() => {
if (typeof window === "undefined") return false;
return localStorage.getItem(ONBOARDING_STEP_KEY) === null;
});
// Sync onboarding completion state with localStorage
useEffect(() => {
const checkOnboarding = () => {
if (typeof window !== "undefined") {
setIsOnboardingComplete(
localStorage.getItem(ONBOARDING_STEP_KEY) === null,
);
}
};
// Check on mount
checkOnboarding();
// Listen for storage events (for cross-tab sync)
window.addEventListener("storage", checkOnboarding);
return () => {
window.removeEventListener("storage", checkOnboarding);
};
}, []);
const setOnboardingComplete = useCallback((complete: boolean) => {
setIsOnboardingComplete(complete);
}, []);
// Listen for ingestion failures and set chat error flag
useEffect(() => {
@ -228,6 +262,10 @@ export function ChatProvider({ children }: ChatProviderProps) {
const startNewConversation = useCallback(async () => {
console.log("[CONVERSATION] Starting new conversation");
// Check if there's existing conversation data - if so, this is a manual "new conversation" action
// Check state values before clearing them
const hasExistingConversation = conversationData !== null || placeholderConversation !== null;
// Clear current conversation data and reset state
setCurrentConversationId(null);
setPreviousResponseIds({ chat: null, langflow: null });
@ -261,15 +299,22 @@ export function ChatProvider({ children }: ChatProviderProps) {
setConversationFilterState(null);
}
} else {
console.log("[CONVERSATION] No default filter set");
setConversationFilterState(null);
// No default filter in localStorage
if (hasExistingConversation) {
// User is manually starting a new conversation - clear the filter
console.log("[CONVERSATION] Manual new conversation - clearing filter");
setConversationFilterState(null);
} else {
// First time after onboarding - preserve existing filter if set
// This prevents clearing the filter when startNewConversation is called multiple times during onboarding
console.log("[CONVERSATION] No default filter set, preserving existing filter if any");
// Don't clear the filter - it may have been set by storeDefaultFilterForNewConversations
}
}
} else {
setConversationFilterState(null);
}
// Create a temporary placeholder conversation to show in sidebar
const placeholderConversation: ConversationData = {
const newPlaceholderConversation: ConversationData = {
response_id: "new-conversation-" + Date.now(),
title: "New conversation",
endpoint: endpoint,
@ -284,10 +329,10 @@ export function ChatProvider({ children }: ChatProviderProps) {
last_activity: new Date().toISOString(),
};
setPlaceholderConversation(placeholderConversation);
setPlaceholderConversation(newPlaceholderConversation);
// Force immediate refresh to ensure sidebar shows correct state
refreshConversations(true);
}, [endpoint, refreshConversations]);
}, [endpoint, refreshConversations, conversationData, placeholderConversation]);
const addConversationDoc = useCallback((filename: string) => {
setConversationDocs((prev) => [
@ -375,6 +420,8 @@ export function ChatProvider({ children }: ChatProviderProps) {
setConversationFilter,
hasChatError,
setChatError,
isOnboardingComplete,
setOnboardingComplete,
}),
[
endpoint,
@ -396,6 +443,8 @@ export function ChatProvider({ children }: ChatProviderProps) {
conversationFilter,
setConversationFilter,
hasChatError,
isOnboardingComplete,
setOnboardingComplete,
],
);

View file

@ -12,6 +12,7 @@ export interface UploadFileResult {
raw: unknown;
createFilter?: boolean;
filename?: string;
taskId?: string;
}
export async function duplicateCheck(
@ -158,6 +159,7 @@ export async function uploadFile(
(uploadIngestJson as { upload?: { id?: string } }).upload?.id ||
(uploadIngestJson as { id?: string }).id ||
(uploadIngestJson as { task_id?: string }).task_id;
const taskId = (uploadIngestJson as { task_id?: string }).task_id;
const filePath =
(uploadIngestJson as { upload?: { path?: string } }).upload?.path ||
(uploadIngestJson as { path?: string }).path ||
@ -197,6 +199,7 @@ export async function uploadFile(
raw: uploadIngestJson,
createFilter: shouldCreateFilter,
filename,
taskId,
};
return result;

View file

@ -1,5 +1,6 @@
"""Provider validation utilities for testing API keys and models during onboarding."""
import json
import httpx
from utils.container_utils import transform_localhost_url
from utils.logging_config import get_logger
@ -7,6 +8,106 @@ from utils.logging_config import get_logger
logger = get_logger(__name__)
def _parse_json_error_message(error_text: str) -> str:
"""Parse JSON error message and extract just the message field."""
try:
# Try to parse as JSON
error_data = json.loads(error_text)
if isinstance(error_data, dict):
# WatsonX format: {"errors": [{"code": "...", "message": "..."}], ...}
if "errors" in error_data and isinstance(error_data["errors"], list):
errors = error_data["errors"]
if len(errors) > 0 and isinstance(errors[0], dict):
message = errors[0].get("message", "")
if message:
return message
code = errors[0].get("code", "")
if code:
return f"Error: {code}"
# OpenAI format: {"error": {"message": "...", "type": "...", "code": "..."}}
if "error" in error_data:
error_obj = error_data["error"]
if isinstance(error_obj, dict):
message = error_obj.get("message", "")
if message:
return message
# Direct message field
if "message" in error_data:
return error_data["message"]
# Generic format: {"detail": "..."}
if "detail" in error_data:
return error_data["detail"]
except (json.JSONDecodeError, ValueError, TypeError):
pass
# Return original text if not JSON or can't parse
return error_text
def _extract_error_details(response: httpx.Response) -> str:
"""Extract detailed error message from API response."""
try:
# Try to parse JSON error response
error_data = response.json()
# Common error response formats
if isinstance(error_data, dict):
# WatsonX format: {"errors": [{"code": "...", "message": "..."}], ...}
if "errors" in error_data and isinstance(error_data["errors"], list):
errors = error_data["errors"]
if len(errors) > 0 and isinstance(errors[0], dict):
# Extract just the message from the first error
message = errors[0].get("message", "")
if message:
return message
# Fallback to code if no message
code = errors[0].get("code", "")
if code:
return f"Error: {code}"
# OpenAI format: {"error": {"message": "...", "type": "...", "code": "..."}}
if "error" in error_data:
error_obj = error_data["error"]
if isinstance(error_obj, dict):
message = error_obj.get("message", "")
error_type = error_obj.get("type", "")
code = error_obj.get("code", "")
if message:
details = message
if error_type:
details += f" (type: {error_type})"
if code:
details += f" (code: {code})"
return details
# Anthropic format: {"error": {"message": "...", "type": "..."}}
if "message" in error_data:
return error_data["message"]
# Generic format: {"message": "..."}
if "detail" in error_data:
return error_data["detail"]
# If JSON parsing worked but no structured error found, try parsing text
response_text = response.text[:500]
parsed = _parse_json_error_message(response_text)
if parsed != response_text:
return parsed
return response_text
except (json.JSONDecodeError, ValueError):
# If JSON parsing fails, try parsing the text as JSON string
response_text = response.text[:500] if response.text else f"HTTP {response.status_code}"
parsed = _parse_json_error_message(response_text)
if parsed != response_text:
return parsed
return response_text
async def validate_provider_setup(
provider: str,
api_key: str = None,
@ -30,7 +131,7 @@ async def validate_provider_setup(
If False, performs lightweight validation (no credits consumed). Default: False.
Raises:
Exception: If validation fails with message "Setup failed, please try again or select a different provider."
Exception: If validation fails, raises the original exception with the actual error message.
"""
provider_lower = provider.lower()
@ -70,7 +171,8 @@ async def validate_provider_setup(
except Exception as e:
logger.error(f"Validation failed for provider {provider_lower}: {str(e)}")
raise Exception("Setup failed, please try again or select a different provider.")
# Preserve the original error message instead of replacing it with a generic one
raise
async def test_lightweight_health(
@ -155,8 +257,9 @@ async def _test_openai_lightweight_health(api_key: str) -> None:
)
if response.status_code != 200:
logger.error(f"OpenAI lightweight health check failed: {response.status_code}")
raise Exception(f"OpenAI API key validation failed: {response.status_code}")
error_details = _extract_error_details(response)
logger.error(f"OpenAI lightweight health check failed: {response.status_code} - {error_details}")
raise Exception(f"OpenAI API key validation failed: {error_details}")
logger.info("OpenAI lightweight health check passed")
@ -225,8 +328,9 @@ async def _test_openai_completion_with_tools(api_key: str, llm_model: str) -> No
)
if response.status_code != 200:
logger.error(f"OpenAI completion test failed: {response.status_code} - {response.text}")
raise Exception(f"OpenAI API error: {response.status_code}")
error_details = _extract_error_details(response)
logger.error(f"OpenAI completion test failed: {response.status_code} - {error_details}")
raise Exception(f"OpenAI API error: {error_details}")
logger.info("OpenAI completion with tool calling test passed")
@ -260,8 +364,9 @@ async def _test_openai_embedding(api_key: str, embedding_model: str) -> None:
)
if response.status_code != 200:
logger.error(f"OpenAI embedding test failed: {response.status_code} - {response.text}")
raise Exception(f"OpenAI API error: {response.status_code}")
error_details = _extract_error_details(response)
logger.error(f"OpenAI embedding test failed: {response.status_code} - {error_details}")
raise Exception(f"OpenAI API error: {error_details}")
data = response.json()
if not data.get("data") or len(data["data"]) == 0:
@ -300,8 +405,9 @@ async def _test_watsonx_lightweight_health(
)
if token_response.status_code != 200:
logger.error(f"IBM IAM token request failed: {token_response.status_code}")
raise Exception("Failed to authenticate with IBM Watson - invalid API key")
error_details = _extract_error_details(token_response)
logger.error(f"IBM IAM token request failed: {token_response.status_code} - {error_details}")
raise Exception(f"Failed to authenticate with IBM Watson: {error_details}")
bearer_token = token_response.json().get("access_token")
if not bearer_token:
@ -335,8 +441,9 @@ async def _test_watsonx_completion_with_tools(
)
if token_response.status_code != 200:
logger.error(f"IBM IAM token request failed: {token_response.status_code}")
raise Exception("Failed to authenticate with IBM Watson")
error_details = _extract_error_details(token_response)
logger.error(f"IBM IAM token request failed: {token_response.status_code} - {error_details}")
raise Exception(f"Failed to authenticate with IBM Watson: {error_details}")
bearer_token = token_response.json().get("access_token")
if not bearer_token:
@ -388,8 +495,11 @@ async def _test_watsonx_completion_with_tools(
)
if response.status_code != 200:
logger.error(f"IBM Watson completion test failed: {response.status_code} - {response.text}")
raise Exception(f"IBM Watson API error: {response.status_code}")
error_details = _extract_error_details(response)
logger.error(f"IBM Watson completion test failed: {response.status_code} - {error_details}")
# If error_details is still JSON, parse it to extract just the message
parsed_details = _parse_json_error_message(error_details)
raise Exception(f"IBM Watson API error: {parsed_details}")
logger.info("IBM Watson completion with tool calling test passed")
@ -398,6 +508,13 @@ async def _test_watsonx_completion_with_tools(
raise Exception("Request timed out")
except Exception as e:
logger.error(f"IBM Watson completion test failed: {str(e)}")
# If the error message contains JSON, parse it to extract just the message
error_str = str(e)
if "IBM Watson API error: " in error_str:
json_part = error_str.split("IBM Watson API error: ", 1)[1]
parsed_message = _parse_json_error_message(json_part)
if parsed_message != json_part:
raise Exception(f"IBM Watson API error: {parsed_message}")
raise
@ -419,8 +536,9 @@ async def _test_watsonx_embedding(
)
if token_response.status_code != 200:
logger.error(f"IBM IAM token request failed: {token_response.status_code}")
raise Exception("Failed to authenticate with IBM Watson")
error_details = _extract_error_details(token_response)
logger.error(f"IBM IAM token request failed: {token_response.status_code} - {error_details}")
raise Exception(f"Failed to authenticate with IBM Watson: {error_details}")
bearer_token = token_response.json().get("access_token")
if not bearer_token:
@ -450,8 +568,11 @@ async def _test_watsonx_embedding(
)
if response.status_code != 200:
logger.error(f"IBM Watson embedding test failed: {response.status_code} - {response.text}")
raise Exception(f"IBM Watson API error: {response.status_code}")
error_details = _extract_error_details(response)
logger.error(f"IBM Watson embedding test failed: {response.status_code} - {error_details}")
# If error_details is still JSON, parse it to extract just the message
parsed_details = _parse_json_error_message(error_details)
raise Exception(f"IBM Watson API error: {parsed_details}")
data = response.json()
if not data.get("results") or len(data["results"]) == 0:
@ -464,6 +585,13 @@ async def _test_watsonx_embedding(
raise Exception("Request timed out")
except Exception as e:
logger.error(f"IBM Watson embedding test failed: {str(e)}")
# If the error message contains JSON, parse it to extract just the message
error_str = str(e)
if "IBM Watson API error: " in error_str:
json_part = error_str.split("IBM Watson API error: ", 1)[1]
parsed_message = _parse_json_error_message(json_part)
if parsed_message != json_part:
raise Exception(f"IBM Watson API error: {parsed_message}")
raise
@ -483,8 +611,9 @@ async def _test_ollama_lightweight_health(endpoint: str) -> None:
)
if response.status_code != 200:
logger.error(f"Ollama lightweight health check failed: {response.status_code}")
raise Exception(f"Ollama endpoint not responding: {response.status_code}")
error_details = _extract_error_details(response)
logger.error(f"Ollama lightweight health check failed: {response.status_code} - {error_details}")
raise Exception(f"Ollama endpoint not responding: {error_details}")
logger.info("Ollama lightweight health check passed")
@ -537,8 +666,9 @@ async def _test_ollama_completion_with_tools(llm_model: str, endpoint: str) -> N
)
if response.status_code != 200:
logger.error(f"Ollama completion test failed: {response.status_code} - {response.text}")
raise Exception(f"Ollama API error: {response.status_code}")
error_details = _extract_error_details(response)
logger.error(f"Ollama completion test failed: {response.status_code} - {error_details}")
raise Exception(f"Ollama API error: {error_details}")
logger.info("Ollama completion with tool calling test passed")
@ -569,8 +699,9 @@ async def _test_ollama_embedding(embedding_model: str, endpoint: str) -> None:
)
if response.status_code != 200:
logger.error(f"Ollama embedding test failed: {response.status_code} - {response.text}")
raise Exception(f"Ollama API error: {response.status_code}")
error_details = _extract_error_details(response)
logger.error(f"Ollama embedding test failed: {response.status_code} - {error_details}")
raise Exception(f"Ollama API error: {error_details}")
data = response.json()
if not data.get("embedding"):
@ -616,8 +747,9 @@ async def _test_anthropic_lightweight_health(api_key: str) -> None:
)
if response.status_code != 200:
logger.error(f"Anthropic lightweight health check failed: {response.status_code}")
raise Exception(f"Anthropic API key validation failed: {response.status_code}")
error_details = _extract_error_details(response)
logger.error(f"Anthropic lightweight health check failed: {response.status_code} - {error_details}")
raise Exception(f"Anthropic API key validation failed: {error_details}")
logger.info("Anthropic lightweight health check passed")
@ -672,8 +804,9 @@ async def _test_anthropic_completion_with_tools(api_key: str, llm_model: str) ->
)
if response.status_code != 200:
logger.error(f"Anthropic completion test failed: {response.status_code} - {response.text}")
raise Exception(f"Anthropic API error: {response.status_code}")
error_details = _extract_error_details(response)
logger.error(f"Anthropic completion test failed: {response.status_code} - {error_details}")
raise Exception(f"Anthropic API error: {error_details}")
logger.info("Anthropic completion with tool calling test passed")