import { type UseQueryOptions, useQuery, useQueryClient, } from "@tanstack/react-query"; import { useGetSettingsQuery } from "./useGetSettingsQuery"; export interface ModelOption { value: string; label: string; default?: boolean; } export interface ModelsResponse { language_models: ModelOption[]; embedding_models: ModelOption[]; } export interface OpenAIModelsParams { apiKey?: string; } export interface AnthropicModelsParams { apiKey?: string; } export interface OllamaModelsParams { endpoint?: string; } export interface IBMModelsParams { endpoint?: string; apiKey?: string; projectId?: string; } export const useGetOpenAIModelsQuery = ( params?: OpenAIModelsParams, options?: Omit, "queryKey" | "queryFn">, ) => { const queryClient = useQueryClient(); async function getOpenAIModels(): Promise { const url = new URL("/api/models/openai", window.location.origin); const body: { api_key?: string } = {}; if (params?.apiKey) { body.api_key = params.apiKey; } const response = await fetch(url.toString(), { method: "POST", headers: { "Content-Type": "application/json", }, body: JSON.stringify(body), }); if (response.ok) { return await response.json(); } else { throw new Error("Failed to fetch OpenAI models"); } } const queryResult = useQuery( { queryKey: ["models", "openai", params], queryFn: getOpenAIModels, staleTime: 0, // Always fetch fresh data gcTime: 0, // Don't cache results retry: false, ...options, }, queryClient, ); return queryResult; }; export const useGetAnthropicModelsQuery = ( params?: AnthropicModelsParams, options?: Omit, "queryKey" | "queryFn">, ) => { const queryClient = useQueryClient(); async function getAnthropicModels(): Promise { const url = new URL("/api/models/anthropic", window.location.origin); const body: { api_key?: string } = {}; if (params?.apiKey) { body.api_key = params.apiKey; } const response = await fetch(url.toString(), { method: "POST", headers: { "Content-Type": "application/json", }, body: JSON.stringify(body), }); if (response.ok) { return await response.json(); } else { throw new Error("Failed to fetch Anthropic models"); } } const queryResult = useQuery( { queryKey: ["models", "anthropic", params], queryFn: getAnthropicModels, staleTime: 0, // Always fetch fresh data gcTime: 0, // Don't cache results retry: false, ...options, }, queryClient, ); return queryResult; }; export const useGetOllamaModelsQuery = ( params?: OllamaModelsParams, options?: Omit, "queryKey" | "queryFn">, ) => { const queryClient = useQueryClient(); async function getOllamaModels(): Promise { const url = new URL("/api/models/ollama", window.location.origin); if (params?.endpoint) { url.searchParams.set("endpoint", params.endpoint); } const response = await fetch(url.toString()); if (response.ok) { return await response.json(); } else { throw new Error("Failed to fetch Ollama models"); } } const queryResult = useQuery( { queryKey: ["models", "ollama", params], queryFn: getOllamaModels, staleTime: 0, // Always fetch fresh data gcTime: 0, // Don't cache results retry: false, ...options, }, queryClient, ); return queryResult; }; export const useGetIBMModelsQuery = ( params?: IBMModelsParams, options?: Omit, "queryKey" | "queryFn">, ) => { const queryClient = useQueryClient(); async function getIBMModels(): Promise { const url = new URL("/api/models/ibm", window.location.origin); const body: { endpoint?: string; api_key?: string; project_id?: string; } = {}; if (params?.endpoint) { body.endpoint = params.endpoint; } if (params?.apiKey) { body.api_key = params.apiKey; } if (params?.projectId) { body.project_id = params.projectId; } const response = await fetch(url.toString(), { method: "POST", headers: { "Content-Type": "application/json", }, body: JSON.stringify(body), }); if (response.ok) { return await response.json(); } else { throw new Error("Failed to fetch IBM models"); } } const queryResult = useQuery( { queryKey: ["models", "ibm", params], queryFn: getIBMModels, staleTime: 0, // Always fetch fresh data gcTime: 0, // Don't cache results retry: false, ...options, }, queryClient, ); return queryResult; }; /** * Hook that automatically fetches models for the current LLM provider * based on the settings configuration */ export const useGetCurrentProviderModelsQuery = ( options?: Omit, "queryKey" | "queryFn">, ) => { const { data: settings } = useGetSettingsQuery(); const currentProvider = settings?.agent?.llm_provider; // Determine which hook to use based on current provider const openaiModels = useGetOpenAIModelsQuery( { apiKey: "" }, { enabled: currentProvider === "openai" && options?.enabled !== false, ...options, }, ); const anthropicModels = useGetAnthropicModelsQuery( { apiKey: "" }, { enabled: currentProvider === "anthropic" && options?.enabled !== false, ...options, }, ); const ollamaModels = useGetOllamaModelsQuery( { endpoint: settings?.providers?.ollama?.endpoint }, { enabled: currentProvider === "ollama" && !!settings?.providers?.ollama?.endpoint && options?.enabled !== false, ...options, }, ); const ibmModels = useGetIBMModelsQuery( { endpoint: settings?.providers?.watsonx?.endpoint, apiKey: "", projectId: settings?.providers?.watsonx?.project_id, }, { enabled: currentProvider === "watsonx" && !!settings?.providers?.watsonx?.endpoint && !!settings?.providers?.watsonx?.project_id && options?.enabled !== false, ...options, }, ); // Return the appropriate query result based on current provider switch (currentProvider) { case "openai": return openaiModels; case "anthropic": return anthropicModels; case "ollama": return ollamaModels; case "watsonx": return ibmModels; default: // Return a default/disabled query if no provider is set return { data: undefined, isLoading: false, error: null, refetch: async () => ({ data: undefined }), } as ReturnType; } };