From d6b100459f2e28a4a0ba33b9ff1593ca27fa7985 Mon Sep 17 00:00:00 2001 From: Lucas Oliveira Date: Tue, 30 Sep 2025 15:50:47 -0300 Subject: [PATCH] make endpoint be changed in models service and in onboarding backend instead of onboarding screen --- .../components/ollama-onboarding.tsx | 272 +++++++++--------- src/api/settings.py | 3 + src/services/models_service.py | 3 +- 3 files changed, 140 insertions(+), 138 deletions(-) diff --git a/frontend/src/app/onboarding/components/ollama-onboarding.tsx b/frontend/src/app/onboarding/components/ollama-onboarding.tsx index 261f0e6d..cec7763d 100644 --- a/frontend/src/app/onboarding/components/ollama-onboarding.tsx +++ b/frontend/src/app/onboarding/components/ollama-onboarding.tsx @@ -1,5 +1,4 @@ import { useEffect, useState } from "react"; -import { useGetSettingsQuery } from "@/app/api/queries/useGetSettingsQuery"; import { LabelInput } from "@/components/label-input"; import { LabelWrapper } from "@/components/label-wrapper"; import OllamaLogo from "@/components/logo/ollama-logo"; @@ -12,151 +11,150 @@ import { AdvancedOnboarding } from "./advanced"; import { ModelSelector } from "./model-selector"; export function OllamaOnboarding({ - setSettings, - sampleDataset, - setSampleDataset, + setSettings, + sampleDataset, + setSampleDataset, }: { - setSettings: (settings: OnboardingVariables) => void; - sampleDataset: boolean; - setSampleDataset: (dataset: boolean) => void; + setSettings: (settings: OnboardingVariables) => void; + sampleDataset: boolean; + setSampleDataset: (dataset: boolean) => void; }) { - const { data: settings } = useGetSettingsQuery(); - const [endpoint, setEndpoint] = useState(`http://${settings?.localhost_url ?? "localhost"}:11434`); - const [showConnecting, setShowConnecting] = useState(false); - const debouncedEndpoint = useDebouncedValue(endpoint, 500); + const [endpoint, setEndpoint] = useState(`http://{localhost}:11434`); + const [showConnecting, setShowConnecting] = useState(false); + const debouncedEndpoint = useDebouncedValue(endpoint, 500); - // Fetch models from API when endpoint is provided (debounced) - const { - data: modelsData, - isLoading: isLoadingModels, - error: modelsError, - } = useGetOllamaModelsQuery( - debouncedEndpoint ? { endpoint: debouncedEndpoint } : undefined, - ); + // Fetch models from API when endpoint is provided (debounced) + const { + data: modelsData, + isLoading: isLoadingModels, + error: modelsError, + } = useGetOllamaModelsQuery( + debouncedEndpoint ? { endpoint: debouncedEndpoint } : undefined, + ); - // Use custom hook for model selection logic - const { - languageModel, - embeddingModel, - setLanguageModel, - setEmbeddingModel, - languageModels, - embeddingModels, - } = useModelSelection(modelsData); + // Use custom hook for model selection logic + const { + languageModel, + embeddingModel, + setLanguageModel, + setEmbeddingModel, + languageModels, + embeddingModels, + } = useModelSelection(modelsData); - // Handle delayed display of connecting state - useEffect(() => { - let timeoutId: NodeJS.Timeout; + // Handle delayed display of connecting state + useEffect(() => { + let timeoutId: NodeJS.Timeout; - if (debouncedEndpoint && isLoadingModels) { - timeoutId = setTimeout(() => { - setShowConnecting(true); - }, 500); - } else { - setShowConnecting(false); - } + if (debouncedEndpoint && isLoadingModels) { + timeoutId = setTimeout(() => { + setShowConnecting(true); + }, 500); + } else { + setShowConnecting(false); + } - return () => { - if (timeoutId) { - clearTimeout(timeoutId); - } - }; - }, [debouncedEndpoint, isLoadingModels]); + return () => { + if (timeoutId) { + clearTimeout(timeoutId); + } + }; + }, [debouncedEndpoint, isLoadingModels]); - const handleSampleDatasetChange = (dataset: boolean) => { - setSampleDataset(dataset); - }; + const handleSampleDatasetChange = (dataset: boolean) => { + setSampleDataset(dataset); + }; - // Update settings when values change - useUpdateSettings( - "ollama", - { - endpoint, - languageModel, - embeddingModel, - }, - setSettings, - ); + // Update settings when values change + useUpdateSettings( + "ollama", + { + endpoint, + languageModel, + embeddingModel, + }, + setSettings, + ); - // Check validation state based on models query - const hasConnectionError = debouncedEndpoint && modelsError; - const hasNoModels = - modelsData && - !modelsData.language_models?.length && - !modelsData.embedding_models?.length; + // Check validation state based on models query + const hasConnectionError = debouncedEndpoint && modelsError; + const hasNoModels = + modelsData && + !modelsData.language_models?.length && + !modelsData.embedding_models?.length; - return ( - <> -
-
- setEndpoint(e.target.value)} - /> - {showConnecting && ( -

- Connecting to Ollama server... -

- )} - {hasConnectionError && ( -

- Can’t reach Ollama at {debouncedEndpoint}. Update the base URL or - start the server. -

- )} - {hasNoModels && ( -

- No models found. Install embedding and agent models on your Ollama - server. -

- )} -
- - } - noOptionsPlaceholder={ - isLoadingModels - ? "Loading models..." - : "No embedding models detected. Install an embedding model to continue." - } - value={embeddingModel} - onValueChange={setEmbeddingModel} - /> - - - } - noOptionsPlaceholder={ - isLoadingModels - ? "Loading models..." - : "No language models detected. Install a language model to continue." - } - value={languageModel} - onValueChange={setLanguageModel} - /> - -
- - - ); + return ( + <> +
+
+ setEndpoint(e.target.value)} + /> + {showConnecting && ( +

+ Connecting to Ollama server... +

+ )} + {hasConnectionError && ( +

+ Can’t reach Ollama at {debouncedEndpoint}. Update the base URL or + start the server. +

+ )} + {hasNoModels && ( +

+ No models found. Install embedding and agent models on your Ollama + server. +

+ )} +
+ + } + noOptionsPlaceholder={ + isLoadingModels + ? "Loading models..." + : "No embedding models detected. Install an embedding model to continue." + } + value={embeddingModel} + onValueChange={setEmbeddingModel} + /> + + + } + noOptionsPlaceholder={ + isLoadingModels + ? "Loading models..." + : "No language models detected. Install a language model to continue." + } + value={languageModel} + onValueChange={setLanguageModel} + /> + +
+ + + ); } diff --git a/src/api/settings.py b/src/api/settings.py index a32e8a1e..1d67919e 100644 --- a/src/api/settings.py +++ b/src/api/settings.py @@ -1,6 +1,7 @@ import json import platform from starlette.responses import JSONResponse +from utils.container_utils import transform_localhost_url from utils.logging_config import get_logger from config.settings import ( LANGFLOW_URL, @@ -441,6 +442,8 @@ async def onboarding(request, flows_service): {"error": "endpoint must be a non-empty string"}, status_code=400 ) current_config.provider.endpoint = body["endpoint"].strip() + if "model_provider" in body and body["model_provider"].strip() == "ollama": + current_config.provider.endpoint = transform_localhost_url(body["endpoint"].strip()) config_updated = True if "project_id" in body: diff --git a/src/services/models_service.py b/src/services/models_service.py index 35a54895..a90a74f4 100644 --- a/src/services/models_service.py +++ b/src/services/models_service.py @@ -1,5 +1,6 @@ import httpx from typing import Dict, List +from utils.container_utils import transform_localhost_url from utils.logging_config import get_logger logger = get_logger(__name__) @@ -95,7 +96,7 @@ class ModelsService: """Fetch available models from Ollama API with tool calling capabilities for language models""" try: # Use provided endpoint or default - ollama_url = endpoint + ollama_url = transform_localhost_url(endpoint) # API endpoints tags_url = f"{ollama_url}/api/tags"