Merge branch 'main' into feat/move-filters

This commit is contained in:
Cole Goldsmith 2025-09-23 09:27:01 -05:00
commit 744a799c11
11 changed files with 2110 additions and 691 deletions

View file

@ -2,7 +2,9 @@
"data": {
"id": "OllamaEmbeddings-4ah5Q",
"node": {
"base_classes": ["Embeddings"],
"base_classes": [
"Embeddings"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
@ -10,10 +12,13 @@
"display_name": "Ollama Embeddings",
"documentation": "https://python.langchain.com/docs/integrations/text_embedding/ollama",
"edited": false,
"field_order": ["model_name", "base_url"],
"field_order": [
"model_name",
"base_url"
],
"frozen": false,
"icon": "Ollama",
"last_updated": "2025-09-17T20:01:59.954Z",
"last_updated": "2025-09-22T20:18:27.128Z",
"legacy": false,
"metadata": {
"code_hash": "0db0f99e91e9",
@ -29,12 +34,17 @@
},
{
"name": "langflow",
"version": "1.5.0.post2"
"version": null
}
],
"total_dependencies": 3
},
"keywords": ["model", "llm", "language model", "large language model"],
"keywords": [
"model",
"llm",
"language model",
"large language model"
],
"module": "langflow.components.ollama.ollama_embeddings.OllamaEmbeddingsComponent"
},
"minimized": false,
@ -51,7 +61,9 @@
"required_inputs": null,
"selected": "Embeddings",
"tool_mode": true,
"types": ["Embeddings"],
"types": [
"Embeddings"
],
"value": "__UNDEFINED__"
}
],
@ -64,7 +76,9 @@
"display_name": "Ollama Base URL",
"dynamic": false,
"info": "",
"input_types": ["Message"],
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": true,
@ -106,7 +120,9 @@
"dynamic": false,
"info": "",
"name": "model_name",
"options": [],
"options": [
"all-minilm:latest"
],
"options_metadata": [],
"placeholder": "",
"real_time_refresh": true,
@ -118,7 +134,7 @@
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": ""
"value": "all-minilm:latest"
}
},
"tool_mode": false
@ -133,9 +149,9 @@
"width": 320
},
"position": {
"x": 964,
"y": 248
"x": 282.29416840859585,
"y": 279.4218065717267
},
"selected": false,
"type": "genericNode"
}
}

View file

@ -2,7 +2,10 @@
"data": {
"id": "OllamaModel-eCsJx",
"node": {
"base_classes": ["LanguageModel", "Message"],
"base_classes": [
"LanguageModel",
"Message"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
@ -40,7 +43,7 @@
],
"frozen": false,
"icon": "Ollama",
"last_updated": "2025-09-17T20:01:59.191Z",
"last_updated": "2025-09-22T20:14:45.057Z",
"legacy": false,
"metadata": {
"code_hash": "af399d429d23",
@ -56,12 +59,17 @@
},
{
"name": "langflow",
"version": "1.5.0.post2"
"version": null
}
],
"total_dependencies": 3
},
"keywords": ["model", "llm", "language model", "large language model"],
"keywords": [
"model",
"llm",
"language model",
"large language model"
],
"module": "langflow.components.ollama.ollama.ChatOllamaComponent"
},
"minimized": false,
@ -77,7 +85,9 @@
"options": null,
"required_inputs": null,
"tool_mode": true,
"types": ["Message"],
"types": [
"Message"
],
"value": "__UNDEFINED__"
},
{
@ -91,7 +101,9 @@
"required_inputs": null,
"selected": "LanguageModel",
"tool_mode": true,
"types": ["LanguageModel"],
"types": [
"LanguageModel"
],
"value": "__UNDEFINED__"
}
],
@ -104,7 +116,9 @@
"display_name": "Base URL",
"dynamic": false,
"info": "Endpoint of the Ollama API.",
"input_types": ["Message"],
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": true,
@ -144,7 +158,9 @@
"display_name": "Format",
"dynamic": false,
"info": "Specify the format of the output (e.g., json).",
"input_types": ["Message"],
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
@ -165,7 +181,9 @@
"display_name": "Input",
"dynamic": false,
"info": "",
"input_types": ["Message"],
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
@ -207,7 +225,11 @@
"dynamic": false,
"info": "Enable/disable Mirostat sampling for controlling perplexity.",
"name": "mirostat",
"options": ["Disabled", "Mirostat", "Mirostat 2.0"],
"options": [
"Disabled",
"Mirostat",
"Mirostat 2.0"
],
"options_metadata": [],
"placeholder": "",
"real_time_refresh": true,
@ -265,7 +287,9 @@
"dynamic": false,
"info": "Refer to https://ollama.com/library for more models.",
"name": "model_name",
"options": [],
"options": [
"qwen3:4b"
],
"options_metadata": [],
"placeholder": "",
"real_time_refresh": true,
@ -277,7 +301,7 @@
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": ""
"value": "qwen3:4b"
},
"num_ctx": {
"_input_type": "IntInput",
@ -375,7 +399,9 @@
"display_name": "Stop Tokens",
"dynamic": false,
"info": "Comma-separated list of tokens to signal the model to stop generating text.",
"input_types": ["Message"],
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
@ -414,7 +440,9 @@
"display_name": "System",
"dynamic": false,
"info": "System to use for generating text.",
"input_types": ["Message"],
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
@ -436,7 +464,9 @@
"display_name": "System Message",
"dynamic": false,
"info": "System message to pass to the model.",
"input_types": ["Message"],
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
@ -458,7 +488,9 @@
"display_name": "Tags",
"dynamic": false,
"info": "Comma-separated list of tags to add to the run trace.",
"input_types": ["Message"],
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
@ -507,7 +539,9 @@
"display_name": "Template",
"dynamic": false,
"info": "Template to use for generating text.",
"input_types": ["Message"],
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
@ -638,15 +672,16 @@
"showNode": true,
"type": "OllamaModel"
},
"dragging": false,
"id": "OllamaModel-eCsJx",
"measured": {
"height": 494,
"width": 320
},
"position": {
"x": 554,
"y": 225
"x": 248.08287272472313,
"y": 216.98088326271431
},
"selected": false,
"type": "genericNode"
}
}

File diff suppressed because one or more lines are too long

View file

@ -2,7 +2,9 @@
"data": {
"id": "WatsonxEmbeddingsComponent-pJfXI",
"node": {
"base_classes": ["Embeddings"],
"base_classes": [
"Embeddings"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
@ -20,6 +22,7 @@
],
"frozen": false,
"icon": "WatsonxAI",
"last_updated": "2025-09-22T20:11:38.181Z",
"legacy": false,
"metadata": {
"code_hash": "b6c6d50cc7ed",
@ -43,7 +46,7 @@
},
{
"name": "langflow",
"version": "1.5.0.post2"
"version": null
}
],
"total_dependencies": 5
@ -60,9 +63,13 @@
"group_outputs": false,
"method": "build_embeddings",
"name": "embeddings",
"options": null,
"required_inputs": null,
"selected": "Embeddings",
"tool_mode": true,
"types": ["Embeddings"],
"types": [
"Embeddings"
],
"value": "__UNDEFINED__"
}
],
@ -131,7 +138,16 @@
"dynamic": true,
"info": "",
"name": "model_name",
"options": [],
"options": [
"ibm/granite-embedding-107m-multilingual",
"ibm/granite-embedding-278m-multilingual",
"ibm/slate-125m-english-rtrvr",
"ibm/slate-125m-english-rtrvr-v2",
"ibm/slate-30m-english-rtrvr",
"ibm/slate-30m-english-rtrvr-v2",
"intfloat/multilingual-e5-large",
"sentence-transformers/all-minilm-l6-v2"
],
"options_metadata": [],
"placeholder": "",
"required": true,
@ -140,7 +156,8 @@
"toggle": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str"
"type": "str",
"value": "ibm/granite-embedding-107m-multilingual"
},
"project_id": {
"_input_type": "StrInput",
@ -205,7 +222,8 @@
"toggle": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str"
"type": "str",
"value": "https://us-south.ml.cloud.ibm.com"
}
},
"tool_mode": false
@ -213,15 +231,16 @@
"showNode": true,
"type": "WatsonxEmbeddingsComponent"
},
"dragging": false,
"id": "WatsonxEmbeddingsComponent-pJfXI",
"measured": {
"height": 467,
"width": 320
},
"position": {
"x": 999.129592360849,
"y": 753.2332292351236
"x": 364.4406919374723,
"y": 282.29319267029086
},
"selected": false,
"type": "genericNode"
}
}

View file

@ -2,7 +2,10 @@
"data": {
"id": "IBMwatsonxModel-jA4Nw",
"node": {
"base_classes": ["LanguageModel", "Message"],
"base_classes": [
"LanguageModel",
"Message"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
@ -31,6 +34,7 @@
],
"frozen": false,
"icon": "WatsonxAI",
"last_updated": "2025-09-22T20:03:31.248Z",
"legacy": false,
"metadata": {
"code_hash": "7767fd69a954",
@ -50,12 +54,17 @@
},
{
"name": "langflow",
"version": "1.5.0.post2"
"version": null
}
],
"total_dependencies": 4
},
"keywords": ["model", "llm", "language model", "large language model"],
"keywords": [
"model",
"llm",
"language model",
"large language model"
],
"module": "langflow.components.ibm.watsonx.WatsonxAIComponent"
},
"minimized": false,
@ -68,8 +77,12 @@
"group_outputs": false,
"method": "text_response",
"name": "text_output",
"options": null,
"required_inputs": null,
"tool_mode": true,
"types": ["Message"],
"types": [
"Message"
],
"value": "__UNDEFINED__"
},
{
@ -79,9 +92,13 @@
"group_outputs": false,
"method": "build_model",
"name": "model_output",
"options": null,
"required_inputs": null,
"selected": "LanguageModel",
"tool_mode": true,
"types": ["LanguageModel"],
"types": [
"LanguageModel"
],
"value": "__UNDEFINED__"
}
],
@ -157,7 +174,9 @@
"display_name": "Input",
"dynamic": false,
"info": "",
"input_types": ["Message"],
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
@ -242,7 +261,26 @@
"dynamic": true,
"info": "",
"name": "model_name",
"options": [],
"options": [
"ibm/granite-3-2-8b-instruct",
"ibm/granite-3-2b-instruct",
"ibm/granite-3-3-8b-instruct",
"ibm/granite-3-8b-instruct",
"ibm/granite-guardian-3-2b",
"ibm/granite-guardian-3-8b",
"ibm/granite-vision-3-2-2b",
"meta-llama/llama-3-2-11b-vision-instruct",
"meta-llama/llama-3-2-90b-vision-instruct",
"meta-llama/llama-3-3-70b-instruct",
"meta-llama/llama-3-405b-instruct",
"meta-llama/llama-4-maverick-17b-128e-instruct-fp8",
"meta-llama/llama-guard-3-11b-vision",
"mistralai/mistral-large",
"mistralai/mistral-medium-2505",
"mistralai/mistral-small-3-1-24b-instruct-2503",
"mistralai/pixtral-12b",
"openai/gpt-oss-120b"
],
"options_metadata": [],
"placeholder": "",
"required": true,
@ -251,7 +289,8 @@
"toggle": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str"
"type": "str",
"value": "ibm/granite-3-2-8b-instruct"
},
"presence_penalty": {
"_input_type": "SliderInput",
@ -362,7 +401,9 @@
"display_name": "System Message",
"dynamic": false,
"info": "System message to pass to the model.",
"input_types": ["Message"],
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
@ -484,7 +525,8 @@
"toggle": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str"
"type": "str",
"value": "https://us-south.ml.cloud.ibm.com"
}
},
"tool_mode": false
@ -493,15 +535,16 @@
"showNode": true,
"type": "IBMwatsonxModel"
},
"dragging": false,
"id": "IBMwatsonxModel-jA4Nw",
"measured": {
"height": 632,
"width": 320
},
"position": {
"x": 562.2658900512183,
"y": 895.3455179382565
"x": 371.93566807042805,
"y": 197.47711431325635
},
"selected": false,
"type": "genericNode"
}
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1,10 +1,11 @@
"use client";
import { useRouter } from "next/navigation";
import { Suspense, useEffect, useState } from "react";
import { toast } from "sonner";
import {
useOnboardingMutation,
type OnboardingVariables,
type OnboardingVariables,
useOnboardingMutation,
} from "@/app/api/mutations/useOnboardingMutation";
import IBMLogo from "@/components/logo/ibm-logo";
import OllamaLogo from "@/components/logo/ollama-logo";
@ -12,198 +13,198 @@ import OpenAILogo from "@/components/logo/openai-logo";
import { ProtectedRoute } from "@/components/protected-route";
import { Button } from "@/components/ui/button";
import {
Card,
CardContent,
CardFooter,
CardHeader,
Card,
CardContent,
CardFooter,
CardHeader,
} from "@/components/ui/card";
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/ui/tooltip";
import { useGetSettingsQuery } from "../api/queries/useGetSettingsQuery";
import { IBMOnboarding } from "./components/ibm-onboarding";
import { OllamaOnboarding } from "./components/ollama-onboarding";
import { OpenAIOnboarding } from "./components/openai-onboarding";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/ui/tooltip";
import { useGetSettingsQuery } from "../api/queries/useGetSettingsQuery";
import { useRouter } from "next/navigation";
function OnboardingPage() {
const { data: settingsDb, isLoading: isSettingsLoading } =
useGetSettingsQuery();
const { data: settingsDb, isLoading: isSettingsLoading } =
useGetSettingsQuery();
const redirect = "/";
const redirect = "/";
const router = useRouter();
const router = useRouter();
// Redirect if already authenticated or in no-auth mode
useEffect(() => {
if (!isSettingsLoading && settingsDb && settingsDb.edited) {
router.push(redirect);
}
}, [isSettingsLoading, redirect]);
// Redirect if already authenticated or in no-auth mode
useEffect(() => {
if (!isSettingsLoading && settingsDb && settingsDb.edited) {
router.push(redirect);
}
}, [isSettingsLoading, settingsDb, router]);
const [modelProvider, setModelProvider] = useState<string>("openai");
const [modelProvider, setModelProvider] = useState<string>("openai");
const [sampleDataset, setSampleDataset] = useState<boolean>(true);
const [sampleDataset, setSampleDataset] = useState<boolean>(true);
const handleSetModelProvider = (provider: string) => {
setModelProvider(provider);
setSettings({
model_provider: provider,
embedding_model: "",
llm_model: "",
});
};
const handleSetModelProvider = (provider: string) => {
setModelProvider(provider);
setSettings({
model_provider: provider,
embedding_model: "",
llm_model: "",
});
};
const [settings, setSettings] = useState<OnboardingVariables>({
model_provider: modelProvider,
embedding_model: "",
llm_model: "",
});
const [settings, setSettings] = useState<OnboardingVariables>({
model_provider: modelProvider,
embedding_model: "",
llm_model: "",
});
// Mutations
const onboardingMutation = useOnboardingMutation({
onSuccess: (data) => {
toast.success("Onboarding completed successfully!");
console.log("Onboarding completed successfully", data);
},
onError: (error) => {
toast.error("Failed to complete onboarding", {
description: error.message,
});
},
});
// Mutations
const onboardingMutation = useOnboardingMutation({
onSuccess: (data) => {
toast.success("Onboarding completed successfully!");
console.log("Onboarding completed successfully", data);
router.push(redirect);
},
onError: (error) => {
toast.error("Failed to complete onboarding", {
description: error.message,
});
},
});
const handleComplete = () => {
if (
!settings.model_provider ||
!settings.llm_model ||
!settings.embedding_model
) {
toast.error("Please complete all required fields");
return;
}
const handleComplete = () => {
if (
!settings.model_provider ||
!settings.llm_model ||
!settings.embedding_model
) {
toast.error("Please complete all required fields");
return;
}
// Prepare onboarding data
const onboardingData: OnboardingVariables = {
model_provider: settings.model_provider,
llm_model: settings.llm_model,
embedding_model: settings.embedding_model,
sample_data: sampleDataset,
};
// Prepare onboarding data
const onboardingData: OnboardingVariables = {
model_provider: settings.model_provider,
llm_model: settings.llm_model,
embedding_model: settings.embedding_model,
sample_data: sampleDataset,
};
// Add API key if available
if (settings.api_key) {
onboardingData.api_key = settings.api_key;
}
// Add API key if available
if (settings.api_key) {
onboardingData.api_key = settings.api_key;
}
// Add endpoint if available
if (settings.endpoint) {
onboardingData.endpoint = settings.endpoint;
}
// Add endpoint if available
if (settings.endpoint) {
onboardingData.endpoint = settings.endpoint;
}
// Add project_id if available
if (settings.project_id) {
onboardingData.project_id = settings.project_id;
}
// Add project_id if available
if (settings.project_id) {
onboardingData.project_id = settings.project_id;
}
onboardingMutation.mutate(onboardingData);
};
onboardingMutation.mutate(onboardingData);
};
const isComplete = !!settings.llm_model && !!settings.embedding_model;
const isComplete = !!settings.llm_model && !!settings.embedding_model;
return (
<div
className="min-h-dvh w-full flex gap-5 flex-col items-center justify-center bg-background p-4"
style={{
backgroundImage: "url('/images/background.png')",
backgroundSize: "cover",
backgroundPosition: "center",
}}
>
<div className="flex flex-col items-center gap-5 min-h-[550px] w-full">
<div className="flex flex-col items-center justify-center gap-4">
<h1 className="text-2xl font-medium font-chivo">
Configure your models
</h1>
<p className="text-sm text-muted-foreground">[description of task]</p>
</div>
<Card className="w-full max-w-[580px]">
<Tabs
defaultValue={modelProvider}
onValueChange={handleSetModelProvider}
>
<CardHeader>
<TabsList>
<TabsTrigger value="openai">
<OpenAILogo className="w-4 h-4" />
OpenAI
</TabsTrigger>
<TabsTrigger value="watsonx">
<IBMLogo className="w-4 h-4" />
IBM
</TabsTrigger>
<TabsTrigger value="ollama">
<OllamaLogo className="w-4 h-4" />
Ollama
</TabsTrigger>
</TabsList>
</CardHeader>
<CardContent>
<TabsContent value="openai">
<OpenAIOnboarding
setSettings={setSettings}
sampleDataset={sampleDataset}
setSampleDataset={setSampleDataset}
/>
</TabsContent>
<TabsContent value="watsonx">
<IBMOnboarding
setSettings={setSettings}
sampleDataset={sampleDataset}
setSampleDataset={setSampleDataset}
/>
</TabsContent>
<TabsContent value="ollama">
<OllamaOnboarding
setSettings={setSettings}
sampleDataset={sampleDataset}
setSampleDataset={setSampleDataset}
/>
</TabsContent>
</CardContent>
</Tabs>
<CardFooter className="flex justify-end">
<Tooltip>
<TooltipTrigger asChild>
<Button
size="sm"
onClick={handleComplete}
disabled={!isComplete}
loading={onboardingMutation.isPending}
>
Complete
</Button>
</TooltipTrigger>
<TooltipContent>
{!isComplete ? "Please fill in all required fields" : ""}
</TooltipContent>
</Tooltip>
</CardFooter>
</Card>
</div>
</div>
);
return (
<div
className="min-h-dvh w-full flex gap-5 flex-col items-center justify-center bg-background p-4"
style={{
backgroundImage: "url('/images/background.png')",
backgroundSize: "cover",
backgroundPosition: "center",
}}
>
<div className="flex flex-col items-center gap-5 min-h-[550px] w-full">
<div className="flex flex-col items-center justify-center gap-4">
<h1 className="text-2xl font-medium font-chivo">
Configure your models
</h1>
<p className="text-sm text-muted-foreground">[description of task]</p>
</div>
<Card className="w-full max-w-[580px]">
<Tabs
defaultValue={modelProvider}
onValueChange={handleSetModelProvider}
>
<CardHeader>
<TabsList>
<TabsTrigger value="openai">
<OpenAILogo className="w-4 h-4" />
OpenAI
</TabsTrigger>
<TabsTrigger value="watsonx">
<IBMLogo className="w-4 h-4" />
IBM
</TabsTrigger>
<TabsTrigger value="ollama">
<OllamaLogo className="w-4 h-4" />
Ollama
</TabsTrigger>
</TabsList>
</CardHeader>
<CardContent>
<TabsContent value="openai">
<OpenAIOnboarding
setSettings={setSettings}
sampleDataset={sampleDataset}
setSampleDataset={setSampleDataset}
/>
</TabsContent>
<TabsContent value="watsonx">
<IBMOnboarding
setSettings={setSettings}
sampleDataset={sampleDataset}
setSampleDataset={setSampleDataset}
/>
</TabsContent>
<TabsContent value="ollama">
<OllamaOnboarding
setSettings={setSettings}
sampleDataset={sampleDataset}
setSampleDataset={setSampleDataset}
/>
</TabsContent>
</CardContent>
</Tabs>
<CardFooter className="flex justify-end">
<Tooltip>
<TooltipTrigger asChild>
<Button
size="sm"
onClick={handleComplete}
disabled={!isComplete}
loading={onboardingMutation.isPending}
>
Complete
</Button>
</TooltipTrigger>
<TooltipContent>
{!isComplete ? "Please fill in all required fields" : ""}
</TooltipContent>
</Tooltip>
</CardFooter>
</Card>
</div>
</div>
);
}
export default function ProtectedOnboardingPage() {
return (
<ProtectedRoute>
<Suspense fallback={<div>Loading onboarding...</div>}>
<OnboardingPage />
</Suspense>
</ProtectedRoute>
);
return (
<ProtectedRoute>
<Suspense fallback={<div>Loading onboarding...</div>}>
<OnboardingPage />
</Suspense>
</ProtectedRoute>
);
}

View file

@ -496,12 +496,18 @@ class AppClients:
WATSONX_LLM_COMPONENT_PATH = os.getenv(
"WATSONX_LLM_COMPONENT_PATH", "flows/components/watsonx_llm.json"
)
WATSONX_LLM_TEXT_COMPONENT_PATH = os.getenv(
"WATSONX_LLM_TEXT_COMPONENT_PATH", "flows/components/watsonx_llm_text.json"
)
WATSONX_EMBEDDING_COMPONENT_PATH = os.getenv(
"WATSONX_EMBEDDING_COMPONENT_PATH", "flows/components/watsonx_embedding.json"
)
OLLAMA_LLM_COMPONENT_PATH = os.getenv(
"OLLAMA_LLM_COMPONENT_PATH", "flows/components/ollama_llm.json"
)
OLLAMA_LLM_TEXT_COMPONENT_PATH = os.getenv(
"OLLAMA_LLM_TEXT_COMPONENT_PATH", "flows/components/ollama_llm_text.json"
)
OLLAMA_EMBEDDING_COMPONENT_PATH = os.getenv(
"OLLAMA_EMBEDDING_COMPONENT_PATH", "flows/components/ollama_embedding.json"
)
@ -514,6 +520,9 @@ OPENAI_EMBEDDING_COMPONENT_ID = os.getenv(
OPENAI_LLM_COMPONENT_ID = os.getenv(
"OPENAI_LLM_COMPONENT_ID", "LanguageModelComponent-0YME7"
)
OPENAI_LLM_TEXT_COMPONENT_ID = os.getenv(
"OPENAI_LLM_TEXT_COMPONENT_ID", "LanguageModelComponent-NSTA6"
)
# Provider-specific component IDs
WATSONX_EMBEDDING_COMPONENT_ID = os.getenv(
@ -522,11 +531,18 @@ WATSONX_EMBEDDING_COMPONENT_ID = os.getenv(
WATSONX_LLM_COMPONENT_ID = os.getenv(
"WATSONX_LLM_COMPONENT_ID", "IBMwatsonxModel-jA4Nw"
)
WATSONX_LLM_TEXT_COMPONENT_ID = os.getenv(
"WATSONX_LLM_TEXT_COMPONENT_ID", "IBMwatsonxModel-18kmA"
)
OLLAMA_EMBEDDING_COMPONENT_ID = os.getenv(
"OLLAMA_EMBEDDING_COMPONENT_ID", "OllamaEmbeddings-4ah5Q"
)
OLLAMA_LLM_COMPONENT_ID = os.getenv("OLLAMA_LLM_COMPONENT_ID", "OllamaModel-eCsJx")
OLLAMA_LLM_TEXT_COMPONENT_ID = os.getenv(
"OLLAMA_LLM_TEXT_COMPONENT_ID", "OllamaModel-XDGqZ"
)
# Global clients instance
clients = AppClients()

View file

@ -3,8 +3,13 @@ from config.settings import (
LANGFLOW_URL,
LANGFLOW_CHAT_FLOW_ID,
LANGFLOW_INGEST_FLOW_ID,
OLLAMA_LLM_TEXT_COMPONENT_ID,
OLLAMA_LLM_TEXT_COMPONENT_PATH,
OPENAI_EMBEDDING_COMPONENT_ID,
OPENAI_LLM_COMPONENT_ID,
OPENAI_LLM_TEXT_COMPONENT_ID,
WATSONX_LLM_TEXT_COMPONENT_ID,
WATSONX_LLM_TEXT_COMPONENT_PATH,
clients,
WATSONX_LLM_COMPONENT_PATH,
WATSONX_EMBEDDING_COMPONENT_PATH,
@ -146,7 +151,7 @@ class FlowsService:
try:
# Load component templates based on provider
llm_template, embedding_template = self._load_component_templates(provider)
llm_template, embedding_template, llm_text_template = self._load_component_templates(provider)
logger.info(f"Assigning {provider} components")
@ -158,6 +163,7 @@ class FlowsService:
"flow_id": NUDGES_FLOW_ID,
"embedding_id": OPENAI_EMBEDDING_COMPONENT_ID,
"llm_id": OPENAI_LLM_COMPONENT_ID,
"llm_text_id": OPENAI_LLM_TEXT_COMPONENT_ID,
},
{
"name": "retrieval",
@ -165,6 +171,7 @@ class FlowsService:
"flow_id": LANGFLOW_CHAT_FLOW_ID,
"embedding_id": OPENAI_EMBEDDING_COMPONENT_ID,
"llm_id": OPENAI_LLM_COMPONENT_ID,
"llm_text_id": None,
},
{
"name": "ingest",
@ -172,6 +179,7 @@ class FlowsService:
"flow_id": LANGFLOW_INGEST_FLOW_ID,
"embedding_id": OPENAI_EMBEDDING_COMPONENT_ID,
"llm_id": None, # Ingestion flow might not have LLM
"llm_text_id": None, # Ingestion flow might not have LLM Text
},
]
@ -181,7 +189,7 @@ class FlowsService:
for config in flow_configs:
try:
result = await self._update_flow_components(
config, llm_template, embedding_template
config, llm_template, embedding_template, llm_text_template
)
results.append(result)
logger.info(f"Successfully updated {config['name']} flow")
@ -215,9 +223,11 @@ class FlowsService:
if provider == "watsonx":
llm_path = WATSONX_LLM_COMPONENT_PATH
embedding_path = WATSONX_EMBEDDING_COMPONENT_PATH
llm_text_path = WATSONX_LLM_TEXT_COMPONENT_PATH
elif provider == "ollama":
llm_path = OLLAMA_LLM_COMPONENT_PATH
embedding_path = OLLAMA_EMBEDDING_COMPONENT_PATH
llm_text_path = OLLAMA_LLM_TEXT_COMPONENT_PATH
else:
raise ValueError(f"Unsupported provider: {provider}")
@ -246,21 +256,31 @@ class FlowsService:
with open(embedding_full_path, "r") as f:
embedding_template = json.load(f)
logger.info(f"Loaded component templates for {provider}")
return llm_template, embedding_template
# Load LLM Text template
llm_text_full_path = os.path.join(project_root, llm_text_path)
if not os.path.exists(llm_text_full_path):
raise FileNotFoundError(
f"LLM Text component template not found at: {llm_text_full_path}"
)
async def _update_flow_components(self, config, llm_template, embedding_template):
with open(llm_text_full_path, "r") as f:
llm_text_template = json.load(f)
logger.info(f"Loaded component templates for {provider}")
return llm_template, embedding_template, llm_text_template
async def _update_flow_components(self, config, llm_template, embedding_template, llm_text_template):
"""Update components in a specific flow"""
flow_name = config["name"]
flow_file = config["file"]
flow_id = config["flow_id"]
old_embedding_id = config["embedding_id"]
old_llm_id = config["llm_id"]
old_llm_text_id = config["llm_text_id"]
# Extract IDs from templates
new_llm_id = llm_template["data"]["id"]
new_embedding_id = embedding_template["data"]["id"]
new_llm_text_id = llm_text_template["data"]["id"]
# Get the project root directory
current_file_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.dirname(current_file_dir)
@ -308,6 +328,21 @@ class FlowsService:
self._replace_node_in_flow(flow_data, old_llm_id, new_llm_node)
components_updated.append(f"llm: {old_llm_id} -> {new_llm_id}")
# Replace LLM component (if exists in this flow)
if old_llm_text_id:
llm_text_node = self._find_node_by_id(flow_data, old_llm_text_id)
if llm_text_node:
# Preserve position
original_position = llm_text_node.get("position", {})
# Replace with new template
new_llm_text_node = llm_text_template.copy()
new_llm_text_node["position"] = original_position
# Replace in flow
self._replace_node_in_flow(flow_data, old_llm_text_id, new_llm_text_node)
components_updated.append(f"llm: {old_llm_text_id} -> {new_llm_text_id}")
# Update all edge references using regex replacement
flow_json_str = json.dumps(flow_data)
@ -326,6 +361,11 @@ class FlowsService:
flow_json_str = re.sub(
re.escape(old_llm_id), new_llm_id, flow_json_str
)
if old_llm_text_id:
flow_json_str = re.sub(
re.escape(old_llm_text_id), new_llm_text_id, flow_json_str
)
flow_json_str = re.sub(
re.escape(old_llm_id.split("-")[0]),
new_llm_id.split("-")[0],
@ -415,7 +455,7 @@ class FlowsService:
]
# Determine target component IDs based on provider
target_embedding_id, target_llm_id = self._get_provider_component_ids(
target_embedding_id, target_llm_id, target_llm_text_id = self._get_provider_component_ids(
provider
)
@ -429,6 +469,7 @@ class FlowsService:
provider,
target_embedding_id,
target_llm_id,
target_llm_text_id,
embedding_model,
llm_model,
endpoint,
@ -471,12 +512,12 @@ class FlowsService:
def _get_provider_component_ids(self, provider: str):
"""Get the component IDs for a specific provider"""
if provider == "watsonx":
return WATSONX_EMBEDDING_COMPONENT_ID, WATSONX_LLM_COMPONENT_ID
return WATSONX_EMBEDDING_COMPONENT_ID, WATSONX_LLM_COMPONENT_ID, WATSONX_LLM_TEXT_COMPONENT_ID
elif provider == "ollama":
return OLLAMA_EMBEDDING_COMPONENT_ID, OLLAMA_LLM_COMPONENT_ID
return OLLAMA_EMBEDDING_COMPONENT_ID, OLLAMA_LLM_COMPONENT_ID, OLLAMA_LLM_TEXT_COMPONENT_ID
elif provider == "openai":
# OpenAI components are the default ones
return OPENAI_EMBEDDING_COMPONENT_ID, OPENAI_LLM_COMPONENT_ID
return OPENAI_EMBEDDING_COMPONENT_ID, OPENAI_LLM_COMPONENT_ID, OPENAI_LLM_TEXT_COMPONENT_ID
else:
raise ValueError(f"Unsupported provider: {provider}")
@ -486,6 +527,7 @@ class FlowsService:
provider: str,
target_embedding_id: str,
target_llm_id: str,
target_llm_text_id: str,
embedding_model: str,
llm_model: str,
endpoint: str = None,
@ -512,7 +554,7 @@ class FlowsService:
embedding_node = self._find_node_by_id(flow_data, target_embedding_id)
if embedding_node:
if self._update_component_fields(
embedding_node, provider, "embedding", embedding_model, endpoint
embedding_node, provider, embedding_model, endpoint
):
updates_made.append(f"embedding model: {embedding_model}")
@ -521,7 +563,15 @@ class FlowsService:
llm_node = self._find_node_by_id(flow_data, target_llm_id)
if llm_node:
if self._update_component_fields(
llm_node, provider, "llm", llm_model, endpoint
llm_node, provider, llm_model, endpoint
):
updates_made.append(f"llm model: {llm_model}")
if target_llm_text_id:
llm_text_node = self._find_node_by_id(flow_data, target_llm_text_id)
if llm_text_node:
if self._update_component_fields(
llm_text_node, provider, llm_model, endpoint
):
updates_made.append(f"llm model: {llm_model}")
@ -569,7 +619,11 @@ class FlowsService:
updated = False
# Update model_name field (common to all providers)
if "model_name" in template:
if provider == "openai" and "model" in template:
template["model"]["value"] = model_value
template["model"]["options"] = [model_value]
updated = True
elif "model_name" in template:
template["model_name"]["value"] = model_value
template["model_name"]["options"] = [model_value]
updated = True

View file

@ -124,10 +124,8 @@ class ModelsService:
for model in models:
model_name = model.get(JSON_NAME_KEY, "")
# Remove tag if present (e.g., "llama3:latest" -> "llama3")
clean_model_name = model_name.split(":")[0] if model_name else ""
if not clean_model_name:
if not model_name:
continue
logger.debug(f"Checking model: {model_name}")
@ -152,7 +150,7 @@ class ModelsService:
# Check if it's an embedding model
is_embedding = any(
embed_model in clean_model_name.lower()
embed_model in model_name.lower()
for embed_model in self.OLLAMA_EMBEDDING_MODELS
)
@ -160,8 +158,8 @@ class ModelsService:
# Embedding models only need completion capability
embedding_models.append(
{
"value": clean_model_name,
"label": clean_model_name,
"value": model_name,
"label": model_name,
"default": False,
}
)
@ -169,9 +167,9 @@ class ModelsService:
# Language models need both completion and tool calling
language_models.append(
{
"value": clean_model_name,
"label": clean_model_name,
"default": "llama3" in clean_model_name.lower(),
"value": model_name,
"label": model_name,
"default": "llama3" in model_name.lower(),
}
)
except Exception as e: