+
Systems Operational
•
Privacy Policy
diff --git a/frontend/src/app/onboarding/components/advanced.tsx b/frontend/src/app/onboarding/components/advanced.tsx
index bb0089d5..20764aed 100644
--- a/frontend/src/app/onboarding/components/advanced.tsx
+++ b/frontend/src/app/onboarding/components/advanced.tsx
@@ -47,8 +47,7 @@ export function AdvancedOnboarding({
{hasEmbeddingModels && (
@@ -63,8 +62,7 @@ export function AdvancedOnboarding({
{hasLanguageModels && (
@@ -79,7 +77,7 @@ export function AdvancedOnboarding({
{(hasLanguageModels || hasEmbeddingModels) && }
diff --git a/frontend/src/app/onboarding/components/ibm-onboarding.tsx b/frontend/src/app/onboarding/components/ibm-onboarding.tsx
index 550f9d6b..63e3fe6a 100644
--- a/frontend/src/app/onboarding/components/ibm-onboarding.tsx
+++ b/frontend/src/app/onboarding/components/ibm-onboarding.tsx
@@ -1,5 +1,6 @@
import { useState } from "react";
import { LabelInput } from "@/components/label-input";
+import { LabelWrapper } from "@/components/label-wrapper";
import IBMLogo from "@/components/logo/ibm-logo";
import { useDebouncedValue } from "@/lib/debounce";
import type { OnboardingVariables } from "../../api/mutations/useOnboardingMutation";
@@ -7,6 +8,7 @@ import { useGetIBMModelsQuery } from "../../api/queries/useGetModelsQuery";
import { useModelSelection } from "../hooks/useModelSelection";
import { useUpdateSettings } from "../hooks/useUpdateSettings";
import { AdvancedOnboarding } from "./advanced";
+import { ModelSelector } from "./model-selector";
export function IBMOnboarding({
setSettings,
@@ -17,10 +19,42 @@ export function IBMOnboarding({
sampleDataset: boolean;
setSampleDataset: (dataset: boolean) => void;
}) {
- const [endpoint, setEndpoint] = useState("");
+ const [endpoint, setEndpoint] = useState("https://us-south.ml.cloud.ibm.com");
const [apiKey, setApiKey] = useState("");
const [projectId, setProjectId] = useState("");
+ const options = [
+ {
+ value: "https://us-south.ml.cloud.ibm.com",
+ label: "https://us-south.ml.cloud.ibm.com",
+ default: true,
+ },
+ {
+ value: "https://eu-de.ml.cloud.ibm.com",
+ label: "https://eu-de.ml.cloud.ibm.com",
+ default: false,
+ },
+ {
+ value: "https://eu-gb.ml.cloud.ibm.com",
+ label: "https://eu-gb.ml.cloud.ibm.com",
+ default: false,
+ },
+ {
+ value: "https://au-syd.ml.cloud.ibm.com",
+ label: "https://au-syd.ml.cloud.ibm.com",
+ default: false,
+ },
+ {
+ value: "https://jp-tok.ml.cloud.ibm.com",
+ label: "https://jp-tok.ml.cloud.ibm.com",
+ default: false,
+ },
+ {
+ value: "https://ca-tor.ml.cloud.ibm.com",
+ label: "https://ca-tor.ml.cloud.ibm.com",
+ default: false,
+ },
+ ];
const debouncedEndpoint = useDebouncedValue(endpoint, 500);
const debouncedApiKey = useDebouncedValue(apiKey, 500);
const debouncedProjectId = useDebouncedValue(projectId, 500);
@@ -68,19 +102,26 @@ export function IBMOnboarding({
return (
<>
-
setEndpoint(e.target.value)}
- />
+ >
+
+
- Invalid configuration or connection failed
+ Connection failed. Check your configuration.
)}
- {modelsData &&
- (modelsData.language_models?.length > 0 ||
- modelsData.embedding_models?.length > 0) && (
-
- Configuration is valid
-
- )}
}
diff --git a/frontend/src/app/onboarding/components/model-selector.tsx b/frontend/src/app/onboarding/components/model-selector.tsx
index 7a74bed2..dfed52ee 100644
--- a/frontend/src/app/onboarding/components/model-selector.tsx
+++ b/frontend/src/app/onboarding/components/model-selector.tsx
@@ -21,6 +21,9 @@ export function ModelSelector({
value,
onValueChange,
icon,
+ placeholder = "Select model...",
+ searchPlaceholder = "Search model...",
+ noOptionsPlaceholder = "No models available",
}: {
options: {
value: string;
@@ -29,6 +32,9 @@ export function ModelSelector({
}[];
value: string;
icon?: React.ReactNode;
+ placeholder?: string;
+ searchPlaceholder?: string;
+ noOptionsPlaceholder?: string;
onValueChange: (value: string) => void;
}) {
const [open, setOpen] = useState(false);
@@ -50,7 +56,7 @@ export function ModelSelector({
>
{value ? (
-
{icon}
+ {icon &&
{icon}
}
{options.find((framework) => framework.value === value)?.label}
{options.find((framework) => framework.value === value)
?.default && (
@@ -60,18 +66,18 @@ export function ModelSelector({
)}
) : options.length === 0 ? (
- "No models available"
+ noOptionsPlaceholder
) : (
- "Select model..."
+ placeholder
)}
-
+
- No model found.
+ {noOptionsPlaceholder}
{options.map((option) => (
void;
}) {
- const [endpoint, setEndpoint] = useState("");
+ const [endpoint, setEndpoint] = useState("http://localhost:11434");
+ const [showConnecting, setShowConnecting] = useState(false);
const debouncedEndpoint = useDebouncedValue(endpoint, 500);
// Fetch models from API when endpoint is provided (debounced)
@@ -41,6 +42,25 @@ export function OllamaOnboarding({
embeddingModels,
} = useModelSelection(modelsData);
+ // Handle delayed display of connecting state
+ useEffect(() => {
+ let timeoutId: NodeJS.Timeout;
+
+ if (debouncedEndpoint && isLoadingModels) {
+ timeoutId = setTimeout(() => {
+ setShowConnecting(true);
+ }, 500);
+ } else {
+ setShowConnecting(false);
+ }
+
+ return () => {
+ if (timeoutId) {
+ clearTimeout(timeoutId);
+ }
+ };
+ }, [debouncedEndpoint, isLoadingModels]);
+
const handleSampleDatasetChange = (dataset: boolean) => {
setSampleDataset(dataset);
};
@@ -57,74 +77,75 @@ export function OllamaOnboarding({
);
// Check validation state based on models query
- const isConnecting = debouncedEndpoint && isLoadingModels;
const hasConnectionError = debouncedEndpoint && modelsError;
const hasNoModels =
modelsData &&
!modelsData.language_models?.length &&
!modelsData.embedding_models?.length;
- const isValidConnection =
- modelsData &&
- (modelsData.language_models?.length > 0 ||
- modelsData.embedding_models?.length > 0);
return (
<>
setEndpoint(e.target.value)}
/>
- {isConnecting && (
+ {showConnecting && (
Connecting to Ollama server...
)}
{hasConnectionError && (
- Can’t reach Ollama at {debouncedEndpoint}. Update the endpoint or
+ Can’t reach Ollama at {debouncedEndpoint}. Update the base URL or
start the server.
)}
{hasNoModels && (
- No models found. Please install some models on your Ollama server.
-
- )}
- {isValidConnection && (
-
- Connected successfully
+ No models found. Install embedding and agent models on your Ollama
+ server.
)}
}
+ noOptionsPlaceholder={
+ isLoadingModels
+ ? "Loading models..."
+ : "No embedding models detected. Install an embedding model to continue."
+ }
value={embeddingModel}
onValueChange={setEmbeddingModel}
/>
}
+ noOptionsPlaceholder={
+ isLoadingModels
+ ? "Loading models..."
+ : "No language models detected. Install a language model to continue."
+ }
value={languageModel}
onValueChange={setLanguageModel}
/>
diff --git a/frontend/src/app/onboarding/components/openai-onboarding.tsx b/frontend/src/app/onboarding/components/openai-onboarding.tsx
index cf18fb53..236097a4 100644
--- a/frontend/src/app/onboarding/components/openai-onboarding.tsx
+++ b/frontend/src/app/onboarding/components/openai-onboarding.tsx
@@ -1,6 +1,8 @@
import { useState } from "react";
import { LabelInput } from "@/components/label-input";
+import { LabelWrapper } from "@/components/label-wrapper";
import OpenAILogo from "@/components/logo/openai-logo";
+import { Switch } from "@/components/ui/switch";
import { useDebouncedValue } from "@/lib/debounce";
import type { OnboardingVariables } from "../../api/mutations/useOnboardingMutation";
import { useGetOpenAIModelsQuery } from "../../api/queries/useGetModelsQuery";
@@ -18,6 +20,7 @@ export function OpenAIOnboarding({
setSampleDataset: (dataset: boolean) => void;
}) {
const [apiKey, setApiKey] = useState("");
+ const [getFromEnv, setGetFromEnv] = useState(true);
const debouncedApiKey = useDebouncedValue(apiKey, 500);
// Fetch models from API when API key is provided
@@ -26,7 +29,12 @@ export function OpenAIOnboarding({
isLoading: isLoadingModels,
error: modelsError,
} = useGetOpenAIModelsQuery(
- debouncedApiKey ? { apiKey: debouncedApiKey } : undefined,
+ getFromEnv
+ ? { apiKey: "" }
+ : debouncedApiKey
+ ? { apiKey: debouncedApiKey }
+ : undefined,
+ { enabled: debouncedApiKey !== "" || getFromEnv },
);
// Use custom hook for model selection logic
const {
@@ -41,6 +49,15 @@ export function OpenAIOnboarding({
setSampleDataset(dataset);
};
+ const handleGetFromEnvChange = (fromEnv: boolean) => {
+ setGetFromEnv(fromEnv);
+ if (fromEnv) {
+ setApiKey("");
+ }
+ setLanguageModel("");
+ setEmbeddingModel("");
+ };
+
// Update settings when values change
useUpdateSettings(
"openai",
@@ -53,33 +70,41 @@ export function OpenAIOnboarding({
);
return (
<>
-
-
setApiKey(e.target.value)}
- />
- {isLoadingModels && (
-
- Validating API key...
-
+
+
+
+
+ {!getFromEnv && (
+
+
setApiKey(e.target.value)}
+ />
+ {isLoadingModels && (
+
+ Validating API key...
+
+ )}
+ {modelsError && (
+
+ Invalid OpenAI API key. Verify or replace the key.
+
+ )}
+
)}
- {modelsError && (
-
- Invalid API key
-
- )}
- {modelsData &&
- (modelsData.language_models?.length > 0 ||
- modelsData.embedding_models?.length > 0) && (
-
- API Key is valid
-
- )}
}
diff --git a/frontend/src/app/onboarding/page.tsx b/frontend/src/app/onboarding/page.tsx
index c58abfea..a82e5fab 100644
--- a/frontend/src/app/onboarding/page.tsx
+++ b/frontend/src/app/onboarding/page.tsx
@@ -4,8 +4,8 @@ import { useRouter } from "next/navigation";
import { Suspense, useEffect, useState } from "react";
import { toast } from "sonner";
import {
- type OnboardingVariables,
- useOnboardingMutation,
+ type OnboardingVariables,
+ useOnboardingMutation,
} from "@/app/api/mutations/useOnboardingMutation";
import IBMLogo from "@/components/logo/ibm-logo";
import OllamaLogo from "@/components/logo/ollama-logo";
@@ -13,198 +13,208 @@ import OpenAILogo from "@/components/logo/openai-logo";
import { ProtectedRoute } from "@/components/protected-route";
import { Button } from "@/components/ui/button";
import {
- Card,
- CardContent,
- CardFooter,
- CardHeader,
+ Card,
+ CardContent,
+ CardFooter,
+ CardHeader,
} from "@/components/ui/card";
+import { DotPattern } from "@/components/ui/dot-pattern";
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs";
import {
- Tooltip,
- TooltipContent,
- TooltipTrigger,
+ Tooltip,
+ TooltipContent,
+ TooltipTrigger,
} from "@/components/ui/tooltip";
+import { cn } from "@/lib/utils";
import { useGetSettingsQuery } from "../api/queries/useGetSettingsQuery";
import { IBMOnboarding } from "./components/ibm-onboarding";
import { OllamaOnboarding } from "./components/ollama-onboarding";
import { OpenAIOnboarding } from "./components/openai-onboarding";
function OnboardingPage() {
- const { data: settingsDb, isLoading: isSettingsLoading } =
- useGetSettingsQuery();
+ const { data: settingsDb, isLoading: isSettingsLoading } =
+ useGetSettingsQuery();
- const redirect = "/";
+ const redirect = "/";
- const router = useRouter();
+ const router = useRouter();
- // Redirect if already authenticated or in no-auth mode
- useEffect(() => {
- if (!isSettingsLoading && settingsDb && settingsDb.edited) {
- router.push(redirect);
- }
- }, [isSettingsLoading, settingsDb, router]);
+ // Redirect if already authenticated or in no-auth mode
+ useEffect(() => {
+ if (!isSettingsLoading && settingsDb && settingsDb.edited) {
+ router.push(redirect);
+ }
+ }, [isSettingsLoading, settingsDb, router]);
- const [modelProvider, setModelProvider] = useState("openai");
+ const [modelProvider, setModelProvider] = useState("openai");
- const [sampleDataset, setSampleDataset] = useState(true);
+ const [sampleDataset, setSampleDataset] = useState(true);
- const handleSetModelProvider = (provider: string) => {
- setModelProvider(provider);
- setSettings({
- model_provider: provider,
- embedding_model: "",
- llm_model: "",
- });
- };
+ const handleSetModelProvider = (provider: string) => {
+ setModelProvider(provider);
+ setSettings({
+ model_provider: provider,
+ embedding_model: "",
+ llm_model: "",
+ });
+ };
- const [settings, setSettings] = useState({
- model_provider: modelProvider,
- embedding_model: "",
- llm_model: "",
- });
+ const [settings, setSettings] = useState({
+ model_provider: modelProvider,
+ embedding_model: "",
+ llm_model: "",
+ });
- // Mutations
- const onboardingMutation = useOnboardingMutation({
- onSuccess: (data) => {
- toast.success("Onboarding completed successfully!");
- console.log("Onboarding completed successfully", data);
- router.push(redirect);
- },
- onError: (error) => {
- toast.error("Failed to complete onboarding", {
- description: error.message,
- });
- },
- });
+ // Mutations
+ const onboardingMutation = useOnboardingMutation({
+ onSuccess: (data) => {
+ toast.success("Onboarding completed successfully!");
+ console.log("Onboarding completed successfully", data);
+ router.push(redirect);
+ },
+ onError: (error) => {
+ toast.error("Failed to complete onboarding", {
+ description: error.message,
+ });
+ },
+ });
- const handleComplete = () => {
- if (
- !settings.model_provider ||
- !settings.llm_model ||
- !settings.embedding_model
- ) {
- toast.error("Please complete all required fields");
- return;
- }
+ const handleComplete = () => {
+ if (
+ !settings.model_provider ||
+ !settings.llm_model ||
+ !settings.embedding_model
+ ) {
+ toast.error("Please complete all required fields");
+ return;
+ }
- // Prepare onboarding data
- const onboardingData: OnboardingVariables = {
- model_provider: settings.model_provider,
- llm_model: settings.llm_model,
- embedding_model: settings.embedding_model,
- sample_data: sampleDataset,
- };
+ // Prepare onboarding data
+ const onboardingData: OnboardingVariables = {
+ model_provider: settings.model_provider,
+ llm_model: settings.llm_model,
+ embedding_model: settings.embedding_model,
+ sample_data: sampleDataset,
+ };
- // Add API key if available
- if (settings.api_key) {
- onboardingData.api_key = settings.api_key;
- }
+ // Add API key if available
+ if (settings.api_key) {
+ onboardingData.api_key = settings.api_key;
+ }
- // Add endpoint if available
- if (settings.endpoint) {
- onboardingData.endpoint = settings.endpoint;
- }
+ // Add endpoint if available
+ if (settings.endpoint) {
+ onboardingData.endpoint = settings.endpoint;
+ }
- // Add project_id if available
- if (settings.project_id) {
- onboardingData.project_id = settings.project_id;
- }
+ // Add project_id if available
+ if (settings.project_id) {
+ onboardingData.project_id = settings.project_id;
+ }
- onboardingMutation.mutate(onboardingData);
- };
+ onboardingMutation.mutate(onboardingData);
+ };
- const isComplete = !!settings.llm_model && !!settings.embedding_model;
+ const isComplete = !!settings.llm_model && !!settings.embedding_model;
- return (
-
-
-
-
- Configure your models
-
-
[description of task]
-
-
-
-
-
-
-
- OpenAI
-
-
-
- IBM
-
-
-
- Ollama
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Complete
-
-
-
- {!isComplete ? "Please fill in all required fields" : ""}
-
-
-
-
-
-
- );
+ return (
+
+
+
+
+
+
+ Connect a model provider
+
+
+
+
+
+
+
+
+ OpenAI
+
+
+
+ IBM
+
+
+
+ Ollama
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Complete
+
+
+
+ {!isComplete && (
+
+ Please fill in all required fields
+
+ )}
+
+
+
+
+
+ );
}
export default function ProtectedOnboardingPage() {
- return (
-
- Loading onboarding... }>
-
-
-
- );
+ return (
+
+ Loading onboarding... }>
+
+
+
+ );
}
diff --git a/frontend/src/app/settings/page.tsx b/frontend/src/app/settings/page.tsx
index f49ff393..91575423 100644
--- a/frontend/src/app/settings/page.tsx
+++ b/frontend/src/app/settings/page.tsx
@@ -35,10 +35,11 @@ import { Textarea } from "@/components/ui/textarea";
import { useAuth } from "@/contexts/auth-context";
import { useTask } from "@/contexts/task-context";
import { useDebounce } from "@/lib/debounce";
+import { DEFAULT_AGENT_SETTINGS, DEFAULT_KNOWLEDGE_SETTINGS, UI_CONSTANTS } from "@/lib/constants";
import { getFallbackModels, type ModelProvider } from "./helpers/model-helpers";
import { ModelSelectItems } from "./helpers/model-select-item";
-const MAX_SYSTEM_PROMPT_CHARS = 2000;
+const { MAX_SYSTEM_PROMPT_CHARS } = UI_CONSTANTS;
interface GoogleDriveFile {
id: string;
@@ -529,8 +530,17 @@ function KnowledgeSourcesPage() {
fetch(`/api/reset-flow/retrieval`, {
method: "POST",
})
- .then((response) => response.json())
+ .then((response) => {
+ if (response.ok) {
+ return response.json();
+ }
+ throw new Error(`HTTP ${response.status}: ${response.statusText}`);
+ })
.then(() => {
+ // Only reset form values if the API call was successful
+ setSystemPrompt(DEFAULT_AGENT_SETTINGS.system_prompt);
+ // Trigger model update to default model
+ handleModelChange(DEFAULT_AGENT_SETTINGS.llm_model);
closeDialog(); // Close after successful completion
})
.catch((error) => {
@@ -543,8 +553,17 @@ function KnowledgeSourcesPage() {
fetch(`/api/reset-flow/ingest`, {
method: "POST",
})
- .then((response) => response.json())
+ .then((response) => {
+ if (response.ok) {
+ return response.json();
+ }
+ throw new Error(`HTTP ${response.status}: ${response.statusText}`);
+ })
.then(() => {
+ // Only reset form values if the API call was successful
+ setChunkSize(DEFAULT_KNOWLEDGE_SETTINGS.chunk_size);
+ setChunkOverlap(DEFAULT_KNOWLEDGE_SETTINGS.chunk_overlap);
+ setProcessingMode(DEFAULT_KNOWLEDGE_SETTINGS.processing_mode);
closeDialog(); // Close after successful completion
})
.catch((error) => {
@@ -764,8 +783,9 @@ function KnowledgeSourcesPage() {
"text-embedding-ada-002"
}
onValueChange={handleEmbeddingModelChange}
+ disabled={true}
>
-
+
diff --git a/frontend/src/lib/constants.ts b/frontend/src/lib/constants.ts
new file mode 100644
index 00000000..9c6ea7b0
--- /dev/null
+++ b/frontend/src/lib/constants.ts
@@ -0,0 +1,23 @@
+/**
+ * Default agent settings
+ */
+export const DEFAULT_AGENT_SETTINGS = {
+ llm_model: "gpt-4o-mini",
+ system_prompt: "You are a helpful assistant that can use tools to answer questions and perform tasks."
+} as const;
+
+/**
+ * Default knowledge/ingest settings
+ */
+export const DEFAULT_KNOWLEDGE_SETTINGS = {
+ chunk_size: 1000,
+ chunk_overlap: 200,
+ processing_mode: "standard"
+} as const;
+
+/**
+ * UI Constants
+ */
+export const UI_CONSTANTS = {
+ MAX_SYSTEM_PROMPT_CHARS: 2000,
+} as const;
\ No newline at end of file
diff --git a/src/api/settings.py b/src/api/settings.py
index ddaf2bf3..3e242c4b 100644
--- a/src/api/settings.py
+++ b/src/api/settings.py
@@ -179,6 +179,7 @@ async def update_settings(request, session_manager):
"chunk_size",
"chunk_overlap",
"doclingPresets",
+ "embedding_model",
}
# Check for invalid fields
@@ -199,11 +200,61 @@ async def update_settings(request, session_manager):
current_config.agent.llm_model = body["llm_model"]
config_updated = True
+ # Also update the chat flow with the new model
+ try:
+ flows_service = _get_flows_service()
+ await flows_service.update_chat_flow_model(body["llm_model"])
+ logger.info(
+ f"Successfully updated chat flow model to '{body['llm_model']}'"
+ )
+ except Exception as e:
+ logger.error(f"Failed to update chat flow model: {str(e)}")
+ # Don't fail the entire settings update if flow update fails
+ # The config will still be saved
+
if "system_prompt" in body:
current_config.agent.system_prompt = body["system_prompt"]
config_updated = True
+ # Also update the chat flow with the new system prompt
+ try:
+ flows_service = _get_flows_service()
+ await flows_service.update_chat_flow_system_prompt(
+ body["system_prompt"]
+ )
+ logger.info(f"Successfully updated chat flow system prompt")
+ except Exception as e:
+ logger.error(f"Failed to update chat flow system prompt: {str(e)}")
+ # Don't fail the entire settings update if flow update fails
+ # The config will still be saved
+
# Update knowledge settings
+ if "embedding_model" in body:
+ if (
+ not isinstance(body["embedding_model"], str)
+ or not body["embedding_model"].strip()
+ ):
+ return JSONResponse(
+ {"error": "embedding_model must be a non-empty string"},
+ status_code=400,
+ )
+ current_config.knowledge.embedding_model = body["embedding_model"].strip()
+ config_updated = True
+
+ # Also update the ingest flow with the new embedding model
+ try:
+ flows_service = _get_flows_service()
+ await flows_service.update_ingest_flow_embedding_model(
+ body["embedding_model"].strip()
+ )
+ logger.info(
+ f"Successfully updated ingest flow embedding model to '{body['embedding_model'].strip()}'"
+ )
+ except Exception as e:
+ logger.error(f"Failed to update ingest flow embedding model: {str(e)}")
+ # Don't fail the entire settings update if flow update fails
+ # The config will still be saved
+
if "doclingPresets" in body:
preset_configs = get_docling_preset_configs()
valid_presets = list(preset_configs.keys())
@@ -219,7 +270,8 @@ async def update_settings(request, session_manager):
# Also update the flow with the new docling preset
try:
- await _update_flow_docling_preset(
+ flows_service = _get_flows_service()
+ await flows_service.update_flow_docling_preset(
body["doclingPresets"], preset_configs[body["doclingPresets"]]
)
logger.info(
@@ -238,6 +290,18 @@ async def update_settings(request, session_manager):
current_config.knowledge.chunk_size = body["chunk_size"]
config_updated = True
+ # Also update the ingest flow with the new chunk size
+ try:
+ flows_service = _get_flows_service()
+ await flows_service.update_ingest_flow_chunk_size(body["chunk_size"])
+ logger.info(
+ f"Successfully updated ingest flow chunk size to {body['chunk_size']}"
+ )
+ except Exception as e:
+ logger.error(f"Failed to update ingest flow chunk size: {str(e)}")
+ # Don't fail the entire settings update if flow update fails
+ # The config will still be saved
+
if "chunk_overlap" in body:
if not isinstance(body["chunk_overlap"], int) or body["chunk_overlap"] < 0:
return JSONResponse(
@@ -247,6 +311,20 @@ async def update_settings(request, session_manager):
current_config.knowledge.chunk_overlap = body["chunk_overlap"]
config_updated = True
+ # Also update the ingest flow with the new chunk overlap
+ try:
+ flows_service = _get_flows_service()
+ await flows_service.update_ingest_flow_chunk_overlap(
+ body["chunk_overlap"]
+ )
+ logger.info(
+ f"Successfully updated ingest flow chunk overlap to {body['chunk_overlap']}"
+ )
+ except Exception as e:
+ logger.error(f"Failed to update ingest flow chunk overlap: {str(e)}")
+ # Don't fail the entire settings update if flow update fails
+ # The config will still be saved
+
if not config_updated:
return JSONResponse(
{"error": "No valid fields provided for update"}, status_code=400
@@ -525,63 +603,11 @@ async def onboarding(request, flows_service):
)
-async def _update_flow_docling_preset(preset: str, preset_config: dict):
- """Helper function to update docling preset in the ingest flow"""
- if not LANGFLOW_INGEST_FLOW_ID:
- raise ValueError("LANGFLOW_INGEST_FLOW_ID is not configured")
+def _get_flows_service():
+ """Helper function to get flows service instance"""
+ from services.flows_service import FlowsService
- # Get the current flow data from Langflow
- response = await clients.langflow_request(
- "GET", f"/api/v1/flows/{LANGFLOW_INGEST_FLOW_ID}"
- )
-
- if response.status_code != 200:
- raise Exception(
- f"Failed to get ingest flow: HTTP {response.status_code} - {response.text}"
- )
-
- flow_data = response.json()
-
- # Find the target node in the flow using environment variable
- nodes = flow_data.get("data", {}).get("nodes", [])
- target_node = None
- target_node_index = None
-
- for i, node in enumerate(nodes):
- if node.get("id") == DOCLING_COMPONENT_ID:
- target_node = node
- target_node_index = i
- break
-
- if target_node is None:
- raise Exception(
- f"Docling component '{DOCLING_COMPONENT_ID}' not found in ingest flow"
- )
-
- # Update the docling_serve_opts value directly in the existing node
- if (
- target_node.get("data", {})
- .get("node", {})
- .get("template", {})
- .get("docling_serve_opts")
- ):
- flow_data["data"]["nodes"][target_node_index]["data"]["node"]["template"][
- "docling_serve_opts"
- ]["value"] = preset_config
- else:
- raise Exception(
- f"docling_serve_opts field not found in node '{DOCLING_COMPONENT_ID}'"
- )
-
- # Update the flow via PATCH request
- patch_response = await clients.langflow_request(
- "PATCH", f"/api/v1/flows/{LANGFLOW_INGEST_FLOW_ID}", json=flow_data
- )
-
- if patch_response.status_code != 200:
- raise Exception(
- f"Failed to update ingest flow: HTTP {patch_response.status_code} - {patch_response.text}"
- )
+ return FlowsService()
async def update_docling_preset(request, session_manager):
@@ -612,7 +638,8 @@ async def update_docling_preset(request, session_manager):
preset_config = preset_configs[preset]
# Use the helper function to update the flow
- await _update_flow_docling_preset(preset, preset_config)
+ flows_service = _get_flows_service()
+ await flows_service.update_flow_docling_preset(preset, preset_config)
logger.info(f"Successfully updated docling preset to '{preset}' in ingest flow")
diff --git a/src/main.py b/src/main.py
index b7883c4b..90add401 100644
--- a/src/main.py
+++ b/src/main.py
@@ -392,8 +392,6 @@ async def startup_tasks(services):
"""Startup tasks"""
logger.info("Starting startup tasks")
await init_index()
- # Sample data ingestion is now handled by the onboarding endpoint when sample_data=True
- logger.info("Sample data ingestion moved to onboarding endpoint")
async def initialize_services():
@@ -939,7 +937,8 @@ async def create_app():
"/settings",
require_auth(services["session_manager"])(
partial(
- settings.update_settings, session_manager=services["session_manager"]
+ settings.update_settings,
+ session_manager=services["session_manager"],
)
),
methods=["POST"],
@@ -951,7 +950,7 @@ async def create_app():
partial(
models.get_openai_models,
models_service=services["models_service"],
- session_manager=services["session_manager"]
+ session_manager=services["session_manager"],
)
),
methods=["GET"],
@@ -962,7 +961,7 @@ async def create_app():
partial(
models.get_ollama_models,
models_service=services["models_service"],
- session_manager=services["session_manager"]
+ session_manager=services["session_manager"],
)
),
methods=["GET"],
@@ -973,7 +972,7 @@ async def create_app():
partial(
models.get_ibm_models,
models_service=services["models_service"],
- session_manager=services["session_manager"]
+ session_manager=services["session_manager"],
)
),
methods=["GET", "POST"],
@@ -982,10 +981,7 @@ async def create_app():
Route(
"/onboarding",
require_auth(services["session_manager"])(
- partial(
- settings.onboarding,
- flows_service=services["flows_service"]
- )
+ partial(settings.onboarding, flows_service=services["flows_service"])
),
methods=["POST"],
),
@@ -995,7 +991,7 @@ async def create_app():
require_auth(services["session_manager"])(
partial(
settings.update_docling_preset,
- session_manager=services["session_manager"]
+ session_manager=services["session_manager"],
)
),
methods=["PATCH"],
diff --git a/src/services/flows_service.py b/src/services/flows_service.py
index 4c3872ca..0d7a7bc8 100644
--- a/src/services/flows_service.py
+++ b/src/services/flows_service.py
@@ -400,6 +400,123 @@ class FlowsService:
return node
return None
+ def _find_node_in_flow(self, flow_data, node_id=None, display_name=None):
+ """
+ Helper function to find a node in flow data by ID or display name.
+ Returns tuple of (node, node_index) or (None, None) if not found.
+ """
+ nodes = flow_data.get("data", {}).get("nodes", [])
+
+ for i, node in enumerate(nodes):
+ node_data = node.get("data", {})
+ node_template = node_data.get("node", {})
+
+ # Check by ID if provided
+ if node_id and node_data.get("id") == node_id:
+ return node, i
+
+ # Check by display_name if provided
+ if display_name and node_template.get("display_name") == display_name:
+ return node, i
+
+ return None, None
+
+ async def _update_flow_field(self, flow_id: str, field_name: str, field_value: str, node_display_name: str = None, node_id: str = None):
+ """
+ Generic helper function to update any field in any Langflow component.
+
+ Args:
+ flow_id: The ID of the flow to update
+ field_name: The name of the field to update (e.g., 'model_name', 'system_message', 'docling_serve_opts')
+ field_value: The new value to set
+ node_display_name: The display name to search for (optional)
+ node_id: The node ID to search for (optional, used as fallback or primary)
+ """
+ if not flow_id:
+ raise ValueError("flow_id is required")
+
+ # Get the current flow data from Langflow
+ response = await clients.langflow_request(
+ "GET", f"/api/v1/flows/{flow_id}"
+ )
+
+ if response.status_code != 200:
+ raise Exception(f"Failed to get flow: HTTP {response.status_code} - {response.text}")
+
+ flow_data = response.json()
+
+ # Find the target component by display name first, then by ID as fallback
+ target_node, target_node_index = None, None
+ if node_display_name:
+ target_node, target_node_index = self._find_node_in_flow(flow_data, display_name=node_display_name)
+
+ if target_node is None and node_id:
+ target_node, target_node_index = self._find_node_in_flow(flow_data, node_id=node_id)
+
+ if target_node is None:
+ identifier = node_display_name or node_id
+ raise Exception(f"Component '{identifier}' not found in flow {flow_id}")
+
+ # Update the field value directly in the existing node
+ template = target_node.get("data", {}).get("node", {}).get("template", {})
+ if template.get(field_name):
+ flow_data["data"]["nodes"][target_node_index]["data"]["node"]["template"][field_name]["value"] = field_value
+ else:
+ identifier = node_display_name or node_id
+ raise Exception(f"{field_name} field not found in {identifier} component")
+
+ # Update the flow via PATCH request
+ patch_response = await clients.langflow_request(
+ "PATCH", f"/api/v1/flows/{flow_id}", json=flow_data
+ )
+
+ if patch_response.status_code != 200:
+ raise Exception(f"Failed to update flow: HTTP {patch_response.status_code} - {patch_response.text}")
+
+ async def update_chat_flow_model(self, model_name: str):
+ """Helper function to update the model in the chat flow"""
+ if not LANGFLOW_CHAT_FLOW_ID:
+ raise ValueError("LANGFLOW_CHAT_FLOW_ID is not configured")
+ await self._update_flow_field(LANGFLOW_CHAT_FLOW_ID, "model_name", model_name,
+ node_display_name="Language Model")
+
+ async def update_chat_flow_system_prompt(self, system_prompt: str):
+ """Helper function to update the system prompt in the chat flow"""
+ if not LANGFLOW_CHAT_FLOW_ID:
+ raise ValueError("LANGFLOW_CHAT_FLOW_ID is not configured")
+ await self._update_flow_field(LANGFLOW_CHAT_FLOW_ID, "system_prompt", system_prompt,
+ node_display_name="Agent")
+
+ async def update_flow_docling_preset(self, preset: str, preset_config: dict):
+ """Helper function to update docling preset in the ingest flow"""
+ if not LANGFLOW_INGEST_FLOW_ID:
+ raise ValueError("LANGFLOW_INGEST_FLOW_ID is not configured")
+
+ from config.settings import DOCLING_COMPONENT_ID
+ await self._update_flow_field(LANGFLOW_INGEST_FLOW_ID, "docling_serve_opts", preset_config,
+ node_id=DOCLING_COMPONENT_ID)
+
+ async def update_ingest_flow_chunk_size(self, chunk_size: int):
+ """Helper function to update chunk size in the ingest flow"""
+ if not LANGFLOW_INGEST_FLOW_ID:
+ raise ValueError("LANGFLOW_INGEST_FLOW_ID is not configured")
+ await self._update_flow_field(LANGFLOW_INGEST_FLOW_ID, "chunk_size", chunk_size,
+ node_display_name="Split Text")
+
+ async def update_ingest_flow_chunk_overlap(self, chunk_overlap: int):
+ """Helper function to update chunk overlap in the ingest flow"""
+ if not LANGFLOW_INGEST_FLOW_ID:
+ raise ValueError("LANGFLOW_INGEST_FLOW_ID is not configured")
+ await self._update_flow_field(LANGFLOW_INGEST_FLOW_ID, "chunk_overlap", chunk_overlap,
+ node_display_name="Split Text")
+
+ async def update_ingest_flow_embedding_model(self, embedding_model: str):
+ """Helper function to update embedding model in the ingest flow"""
+ if not LANGFLOW_INGEST_FLOW_ID:
+ raise ValueError("LANGFLOW_INGEST_FLOW_ID is not configured")
+ await self._update_flow_field(LANGFLOW_INGEST_FLOW_ID, "model", embedding_model,
+ node_display_name="Embedding Model")
+
def _replace_node_in_flow(self, flow_data, old_id, new_node):
"""Replace a node in the flow data"""
nodes = flow_data.get("data", {}).get("nodes", [])