openrag/frontend/src/app/api/queries/useProviderHealthQuery.ts
Lucas Oliveira 37faf94979
feat: adds anthropic provider, splits onboarding editing into two, support provider changing with generic llm and embedding components (#373)
* Added flows with new components

* commented model provider assignment

* Added agent component display name

* commented provider assignment, assign provider on the generic component, assign custom values

* fixed ollama not showing loading steps, fixed loading steps never being removed

* made embedding and llm model optional on onboarding call

* added isEmbedding handling on useModelSelection

* added isEmbedding on onboarding card, separating embedding from non embedding card

* Added one additional step to configure embeddings

* Added embedding provider config

* Changed settings.py to return if not embedding

* Added editing fields to onboarding

* updated onboarding and flows_service to change embedding and llm separately

* updated templates that needs to be changed with provider values

* updated flows with new components

* Changed config manager to not have default models

* Changed flows_service settings

* Complete steps if not embedding

* Add more onboarding steps

* Removed one step from llm steps

* Added Anthropic as a model for the language model on the frontend

* Added anthropic models

* Added anthropic support on Backend

* Fixed provider health and validation

* Format settings

* Change anthropic logo

* Changed button to not jump

* Changed flows service to make anthropic work

* Fixed some things

* add embedding specific global variables

* updated flows

* fixed ingestion flow

* Implemented anthropic on settings page

* add embedding provider logo

* updated backend to work with multiple provider config

* update useUpdateSettings with new settings type

* updated provider health banner to check for health with new api

* changed queries and mutations to use new api

* changed embedding model input to work with new api

* Implemented provider based config on the frontend

* update existing design

* fixed settings configured

* fixed provider health query to include health check for both the providers

* Changed model-providers to show correctly the configured providers

* Updated prompt

* updated openrag agent

* Fixed settings to allow editing providers and changing llm and embedding models

* updated settings

* changed lf ver

* bump openrag version

* added more steps

* update settings to create the global variables

* updated steps

* updated default prompt

---------

Co-authored-by: Sebastián Estévez <estevezsebastian@gmail.com>
2025-11-11 19:22:16 -03:00

103 lines
3.2 KiB
TypeScript

import {
type UseQueryOptions,
useQuery,
useQueryClient,
} from "@tanstack/react-query";
import { useGetSettingsQuery } from "./useGetSettingsQuery";
export interface ProviderHealthDetails {
llm_model: string;
embedding_model: string;
endpoint?: string | null;
}
export interface ProviderHealthResponse {
status: "healthy" | "unhealthy" | "error" | "backend-unavailable";
message: string;
provider?: string;
llm_provider?: string;
embedding_provider?: string;
llm_error?: string | null;
embedding_error?: string | null;
details?: ProviderHealthDetails;
}
export interface ProviderHealthParams {
provider?: "openai" | "ollama" | "watsonx";
}
export const useProviderHealthQuery = (
params?: ProviderHealthParams,
options?: Omit<
UseQueryOptions<ProviderHealthResponse, Error>,
"queryKey" | "queryFn"
>
) => {
const queryClient = useQueryClient();
const { data: settings = {} } = useGetSettingsQuery();
async function checkProviderHealth(): Promise<ProviderHealthResponse> {
try {
const url = new URL("/api/provider/health", window.location.origin);
// Add provider query param if specified
if (params?.provider) {
url.searchParams.set("provider", params.provider);
}
const response = await fetch(url.toString());
if (response.ok) {
return await response.json();
} else if (response.status === 503) {
// Backend is up but provider validation failed
const errorData = await response.json().catch(() => ({}));
return {
status: "unhealthy",
message: errorData.message || "Provider validation failed",
provider: errorData.provider || params?.provider || "unknown",
llm_provider: errorData.llm_provider,
embedding_provider: errorData.embedding_provider,
llm_error: errorData.llm_error,
embedding_error: errorData.embedding_error,
details: errorData.details,
};
} else {
// Other backend errors (400, etc.) - treat as provider issues
const errorData = await response.json().catch(() => ({}));
return {
status: "error",
message: errorData.message || "Failed to check provider health",
provider: errorData.provider || params?.provider || "unknown",
llm_provider: errorData.llm_provider,
embedding_provider: errorData.embedding_provider,
llm_error: errorData.llm_error,
embedding_error: errorData.embedding_error,
details: errorData.details,
};
}
} catch (error) {
// Network error - backend is likely down, don't show provider banner
return {
status: "backend-unavailable",
message: error instanceof Error ? error.message : "Connection failed",
provider: params?.provider || "unknown",
};
}
}
const queryResult = useQuery(
{
queryKey: ["provider", "health"],
queryFn: checkProviderHealth,
retry: false, // Don't retry health checks automatically
enabled: !!settings?.edited && options?.enabled !== false, // Only run after onboarding is complete
...options,
},
queryClient
);
return queryResult;
};