make endpoint be changed in models service and in onboarding backend instead of onboarding screen

This commit is contained in:
Lucas Oliveira 2025-09-30 15:50:47 -03:00
parent 622eb422b2
commit d6b100459f
3 changed files with 140 additions and 138 deletions

View file

@ -1,5 +1,4 @@
import { useEffect, useState } from "react"; import { useEffect, useState } from "react";
import { useGetSettingsQuery } from "@/app/api/queries/useGetSettingsQuery";
import { LabelInput } from "@/components/label-input"; import { LabelInput } from "@/components/label-input";
import { LabelWrapper } from "@/components/label-wrapper"; import { LabelWrapper } from "@/components/label-wrapper";
import OllamaLogo from "@/components/logo/ollama-logo"; import OllamaLogo from "@/components/logo/ollama-logo";
@ -12,151 +11,150 @@ import { AdvancedOnboarding } from "./advanced";
import { ModelSelector } from "./model-selector"; import { ModelSelector } from "./model-selector";
export function OllamaOnboarding({ export function OllamaOnboarding({
setSettings, setSettings,
sampleDataset, sampleDataset,
setSampleDataset, setSampleDataset,
}: { }: {
setSettings: (settings: OnboardingVariables) => void; setSettings: (settings: OnboardingVariables) => void;
sampleDataset: boolean; sampleDataset: boolean;
setSampleDataset: (dataset: boolean) => void; setSampleDataset: (dataset: boolean) => void;
}) { }) {
const { data: settings } = useGetSettingsQuery(); const [endpoint, setEndpoint] = useState(`http://{localhost}:11434`);
const [endpoint, setEndpoint] = useState(`http://${settings?.localhost_url ?? "localhost"}:11434`); const [showConnecting, setShowConnecting] = useState(false);
const [showConnecting, setShowConnecting] = useState(false); const debouncedEndpoint = useDebouncedValue(endpoint, 500);
const debouncedEndpoint = useDebouncedValue(endpoint, 500);
// Fetch models from API when endpoint is provided (debounced) // Fetch models from API when endpoint is provided (debounced)
const { const {
data: modelsData, data: modelsData,
isLoading: isLoadingModels, isLoading: isLoadingModels,
error: modelsError, error: modelsError,
} = useGetOllamaModelsQuery( } = useGetOllamaModelsQuery(
debouncedEndpoint ? { endpoint: debouncedEndpoint } : undefined, debouncedEndpoint ? { endpoint: debouncedEndpoint } : undefined,
); );
// Use custom hook for model selection logic // Use custom hook for model selection logic
const { const {
languageModel, languageModel,
embeddingModel, embeddingModel,
setLanguageModel, setLanguageModel,
setEmbeddingModel, setEmbeddingModel,
languageModels, languageModels,
embeddingModels, embeddingModels,
} = useModelSelection(modelsData); } = useModelSelection(modelsData);
// Handle delayed display of connecting state // Handle delayed display of connecting state
useEffect(() => { useEffect(() => {
let timeoutId: NodeJS.Timeout; let timeoutId: NodeJS.Timeout;
if (debouncedEndpoint && isLoadingModels) { if (debouncedEndpoint && isLoadingModels) {
timeoutId = setTimeout(() => { timeoutId = setTimeout(() => {
setShowConnecting(true); setShowConnecting(true);
}, 500); }, 500);
} else { } else {
setShowConnecting(false); setShowConnecting(false);
} }
return () => { return () => {
if (timeoutId) { if (timeoutId) {
clearTimeout(timeoutId); clearTimeout(timeoutId);
} }
}; };
}, [debouncedEndpoint, isLoadingModels]); }, [debouncedEndpoint, isLoadingModels]);
const handleSampleDatasetChange = (dataset: boolean) => { const handleSampleDatasetChange = (dataset: boolean) => {
setSampleDataset(dataset); setSampleDataset(dataset);
}; };
// Update settings when values change // Update settings when values change
useUpdateSettings( useUpdateSettings(
"ollama", "ollama",
{ {
endpoint, endpoint,
languageModel, languageModel,
embeddingModel, embeddingModel,
}, },
setSettings, setSettings,
); );
// Check validation state based on models query // Check validation state based on models query
const hasConnectionError = debouncedEndpoint && modelsError; const hasConnectionError = debouncedEndpoint && modelsError;
const hasNoModels = const hasNoModels =
modelsData && modelsData &&
!modelsData.language_models?.length && !modelsData.language_models?.length &&
!modelsData.embedding_models?.length; !modelsData.embedding_models?.length;
return ( return (
<> <>
<div className="space-y-4"> <div className="space-y-4">
<div className="space-y-1"> <div className="space-y-1">
<LabelInput <LabelInput
label="Ollama Base URL" label="Ollama Base URL"
helperText="Base URL of your Ollama server" helperText="Base URL of your Ollama server"
id="api-endpoint" id="api-endpoint"
required required
placeholder="http://localhost:11434" placeholder="http://localhost:11434"
value={endpoint} value={endpoint}
onChange={(e) => setEndpoint(e.target.value)} onChange={(e) => setEndpoint(e.target.value)}
/> />
{showConnecting && ( {showConnecting && (
<p className="text-mmd text-muted-foreground"> <p className="text-mmd text-muted-foreground">
Connecting to Ollama server... Connecting to Ollama server...
</p> </p>
)} )}
{hasConnectionError && ( {hasConnectionError && (
<p className="text-mmd text-accent-amber-foreground"> <p className="text-mmd text-accent-amber-foreground">
Cant reach Ollama at {debouncedEndpoint}. Update the base URL or Cant reach Ollama at {debouncedEndpoint}. Update the base URL or
start the server. start the server.
</p> </p>
)} )}
{hasNoModels && ( {hasNoModels && (
<p className="text-mmd text-accent-amber-foreground"> <p className="text-mmd text-accent-amber-foreground">
No models found. Install embedding and agent models on your Ollama No models found. Install embedding and agent models on your Ollama
server. server.
</p> </p>
)} )}
</div> </div>
<LabelWrapper <LabelWrapper
label="Embedding model" label="Embedding model"
helperText="Model used for knowledge ingest and retrieval" helperText="Model used for knowledge ingest and retrieval"
id="embedding-model" id="embedding-model"
required={true} required={true}
> >
<ModelSelector <ModelSelector
options={embeddingModels} options={embeddingModels}
icon={<OllamaLogo className="w-4 h-4" />} icon={<OllamaLogo className="w-4 h-4" />}
noOptionsPlaceholder={ noOptionsPlaceholder={
isLoadingModels isLoadingModels
? "Loading models..." ? "Loading models..."
: "No embedding models detected. Install an embedding model to continue." : "No embedding models detected. Install an embedding model to continue."
} }
value={embeddingModel} value={embeddingModel}
onValueChange={setEmbeddingModel} onValueChange={setEmbeddingModel}
/> />
</LabelWrapper> </LabelWrapper>
<LabelWrapper <LabelWrapper
label="Language model" label="Language model"
helperText="Model used for chat" helperText="Model used for chat"
id="embedding-model" id="embedding-model"
required={true} required={true}
> >
<ModelSelector <ModelSelector
options={languageModels} options={languageModels}
icon={<OllamaLogo className="w-4 h-4" />} icon={<OllamaLogo className="w-4 h-4" />}
noOptionsPlaceholder={ noOptionsPlaceholder={
isLoadingModels isLoadingModels
? "Loading models..." ? "Loading models..."
: "No language models detected. Install a language model to continue." : "No language models detected. Install a language model to continue."
} }
value={languageModel} value={languageModel}
onValueChange={setLanguageModel} onValueChange={setLanguageModel}
/> />
</LabelWrapper> </LabelWrapper>
</div> </div>
<AdvancedOnboarding <AdvancedOnboarding
sampleDataset={sampleDataset} sampleDataset={sampleDataset}
setSampleDataset={handleSampleDatasetChange} setSampleDataset={handleSampleDatasetChange}
/> />
</> </>
); );
} }

View file

@ -1,6 +1,7 @@
import json import json
import platform import platform
from starlette.responses import JSONResponse from starlette.responses import JSONResponse
from utils.container_utils import transform_localhost_url
from utils.logging_config import get_logger from utils.logging_config import get_logger
from config.settings import ( from config.settings import (
LANGFLOW_URL, LANGFLOW_URL,
@ -441,6 +442,8 @@ async def onboarding(request, flows_service):
{"error": "endpoint must be a non-empty string"}, status_code=400 {"error": "endpoint must be a non-empty string"}, status_code=400
) )
current_config.provider.endpoint = body["endpoint"].strip() current_config.provider.endpoint = body["endpoint"].strip()
if "model_provider" in body and body["model_provider"].strip() == "ollama":
current_config.provider.endpoint = transform_localhost_url(body["endpoint"].strip())
config_updated = True config_updated = True
if "project_id" in body: if "project_id" in body:

View file

@ -1,5 +1,6 @@
import httpx import httpx
from typing import Dict, List from typing import Dict, List
from utils.container_utils import transform_localhost_url
from utils.logging_config import get_logger from utils.logging_config import get_logger
logger = get_logger(__name__) logger = get_logger(__name__)
@ -95,7 +96,7 @@ class ModelsService:
"""Fetch available models from Ollama API with tool calling capabilities for language models""" """Fetch available models from Ollama API with tool calling capabilities for language models"""
try: try:
# Use provided endpoint or default # Use provided endpoint or default
ollama_url = endpoint ollama_url = transform_localhost_url(endpoint)
# API endpoints # API endpoints
tags_url = f"{ollama_url}/api/tags" tags_url = f"{ollama_url}/api/tags"