openrag/frontend/src/app/api/queries/useGetModelsQuery.ts
Lucas Oliveira 37faf94979
feat: adds anthropic provider, splits onboarding editing into two, support provider changing with generic llm and embedding components (#373)
* Added flows with new components

* commented model provider assignment

* Added agent component display name

* commented provider assignment, assign provider on the generic component, assign custom values

* fixed ollama not showing loading steps, fixed loading steps never being removed

* made embedding and llm model optional on onboarding call

* added isEmbedding handling on useModelSelection

* added isEmbedding on onboarding card, separating embedding from non embedding card

* Added one additional step to configure embeddings

* Added embedding provider config

* Changed settings.py to return if not embedding

* Added editing fields to onboarding

* updated onboarding and flows_service to change embedding and llm separately

* updated templates that needs to be changed with provider values

* updated flows with new components

* Changed config manager to not have default models

* Changed flows_service settings

* Complete steps if not embedding

* Add more onboarding steps

* Removed one step from llm steps

* Added Anthropic as a model for the language model on the frontend

* Added anthropic models

* Added anthropic support on Backend

* Fixed provider health and validation

* Format settings

* Change anthropic logo

* Changed button to not jump

* Changed flows service to make anthropic work

* Fixed some things

* add embedding specific global variables

* updated flows

* fixed ingestion flow

* Implemented anthropic on settings page

* add embedding provider logo

* updated backend to work with multiple provider config

* update useUpdateSettings with new settings type

* updated provider health banner to check for health with new api

* changed queries and mutations to use new api

* changed embedding model input to work with new api

* Implemented provider based config on the frontend

* update existing design

* fixed settings configured

* fixed provider health query to include health check for both the providers

* Changed model-providers to show correctly the configured providers

* Updated prompt

* updated openrag agent

* Fixed settings to allow editing providers and changing llm and embedding models

* updated settings

* changed lf ver

* bump openrag version

* added more steps

* update settings to create the global variables

* updated steps

* updated default prompt

---------

Co-authored-by: Sebastián Estévez <estevezsebastian@gmail.com>
2025-11-11 19:22:16 -03:00

253 lines
6.3 KiB
TypeScript

import {
type UseQueryOptions,
useQuery,
useQueryClient,
} from "@tanstack/react-query";
import { useGetSettingsQuery } from "./useGetSettingsQuery";
export interface ModelOption {
value: string;
label: string;
default?: boolean;
}
export interface ModelsResponse {
language_models: ModelOption[];
embedding_models: ModelOption[];
}
export interface OpenAIModelsParams {
apiKey?: string;
}
export interface AnthropicModelsParams {
apiKey?: string;
}
export interface OllamaModelsParams {
endpoint?: string;
}
export interface IBMModelsParams {
endpoint?: string;
apiKey?: string;
projectId?: string;
}
export const useGetOpenAIModelsQuery = (
params?: OpenAIModelsParams,
options?: Omit<UseQueryOptions<ModelsResponse>, "queryKey" | "queryFn">,
) => {
const queryClient = useQueryClient();
async function getOpenAIModels(): Promise<ModelsResponse> {
const url = new URL("/api/models/openai", window.location.origin);
if (params?.apiKey) {
url.searchParams.set("api_key", params.apiKey);
}
const response = await fetch(url.toString());
if (response.ok) {
return await response.json();
} else {
throw new Error("Failed to fetch OpenAI models");
}
}
const queryResult = useQuery(
{
queryKey: ["models", "openai", params],
queryFn: getOpenAIModels,
staleTime: 0, // Always fetch fresh data
gcTime: 0, // Don't cache results
retry: false,
...options,
},
queryClient,
);
return queryResult;
};
export const useGetAnthropicModelsQuery = (
params?: AnthropicModelsParams,
options?: Omit<UseQueryOptions<ModelsResponse>, "queryKey" | "queryFn">,
) => {
const queryClient = useQueryClient();
async function getAnthropicModels(): Promise<ModelsResponse> {
const url = new URL("/api/models/anthropic", window.location.origin);
if (params?.apiKey) {
url.searchParams.set("api_key", params.apiKey);
}
const response = await fetch(url.toString());
if (response.ok) {
return await response.json();
} else {
throw new Error("Failed to fetch Anthropic models");
}
}
const queryResult = useQuery(
{
queryKey: ["models", "anthropic", params],
queryFn: getAnthropicModels,
staleTime: 0, // Always fetch fresh data
gcTime: 0, // Don't cache results
retry: false,
...options,
},
queryClient,
);
return queryResult;
};
export const useGetOllamaModelsQuery = (
params?: OllamaModelsParams,
options?: Omit<UseQueryOptions<ModelsResponse>, "queryKey" | "queryFn">,
) => {
const queryClient = useQueryClient();
async function getOllamaModels(): Promise<ModelsResponse> {
const url = new URL("/api/models/ollama", window.location.origin);
if (params?.endpoint) {
url.searchParams.set("endpoint", params.endpoint);
}
const response = await fetch(url.toString());
if (response.ok) {
return await response.json();
} else {
throw new Error("Failed to fetch Ollama models");
}
}
const queryResult = useQuery(
{
queryKey: ["models", "ollama", params],
queryFn: getOllamaModels,
staleTime: 0, // Always fetch fresh data
gcTime: 0, // Don't cache results
retry: false,
...options,
},
queryClient,
);
return queryResult;
};
export const useGetIBMModelsQuery = (
params?: IBMModelsParams,
options?: Omit<UseQueryOptions<ModelsResponse>, "queryKey" | "queryFn">,
) => {
const queryClient = useQueryClient();
async function getIBMModels(): Promise<ModelsResponse> {
const url = new URL("/api/models/ibm", window.location.origin);
if (params?.endpoint) {
url.searchParams.set("endpoint", params.endpoint);
}
if (params?.apiKey) {
url.searchParams.set("api_key", params.apiKey);
}
if (params?.projectId) {
url.searchParams.set("project_id", params.projectId);
}
const response = await fetch(url.toString());
if (response.ok) {
return await response.json();
} else {
throw new Error("Failed to fetch IBM models");
}
}
const queryResult = useQuery(
{
queryKey: ["models", "ibm", params],
queryFn: getIBMModels,
staleTime: 0, // Always fetch fresh data
gcTime: 0, // Don't cache results
retry: false,
...options,
},
queryClient,
);
return queryResult;
};
/**
* Hook that automatically fetches models for the current LLM provider
* based on the settings configuration
*/
export const useGetCurrentProviderModelsQuery = (
options?: Omit<UseQueryOptions<ModelsResponse>, "queryKey" | "queryFn">,
) => {
const { data: settings } = useGetSettingsQuery();
const currentProvider = settings?.agent?.llm_provider;
// Determine which hook to use based on current provider
const openaiModels = useGetOpenAIModelsQuery(
{ apiKey: "" },
{
enabled: currentProvider === "openai" && options?.enabled !== false,
...options,
}
);
const anthropicModels = useGetAnthropicModelsQuery(
{ apiKey: "" },
{
enabled: currentProvider === "anthropic" && options?.enabled !== false,
...options,
}
);
const ollamaModels = useGetOllamaModelsQuery(
{ endpoint: settings?.providers?.ollama?.endpoint },
{
enabled: currentProvider === "ollama" && !!settings?.providers?.ollama?.endpoint && options?.enabled !== false,
...options,
}
);
const ibmModels = useGetIBMModelsQuery(
{
endpoint: settings?.providers?.watsonx?.endpoint,
apiKey: "",
projectId: settings?.providers?.watsonx?.project_id,
},
{
enabled:
currentProvider === "watsonx" &&
!!settings?.providers?.watsonx?.endpoint &&
!!settings?.providers?.watsonx?.project_id &&
options?.enabled !== false,
...options,
}
);
// Return the appropriate query result based on current provider
switch (currentProvider) {
case "openai":
return openaiModels;
case "anthropic":
return anthropicModels;
case "ollama":
return ollamaModels;
case "watsonx":
return ibmModels;
default:
// Return a default/disabled query if no provider is set
return {
data: undefined,
isLoading: false,
error: null,
refetch: async () => ({ data: undefined }),
} as ReturnType<typeof useGetOpenAIModelsQuery>;
}
};