Compare commits

...
Sign in to create a new pull request.

6 commits

Author SHA1 Message Date
Lucas Oliveira
c3ba459d12 fix edge not connecting on nudges flow 2025-09-22 17:59:51 -03:00
Lucas Oliveira
a3a1a17231 changed models service to not remove : on ollama 2025-09-22 17:39:19 -03:00
Lucas Oliveira
745c00cad0 Changed flows service to change llm text components as well 2025-09-22 17:39:07 -03:00
Lucas Oliveira
95fb90ed63 Added new components and ids to settings 2025-09-22 17:38:48 -03:00
Lucas Oliveira
67e2f6ee98 Changed onboarding to redirect automatically 2025-09-22 17:38:35 -03:00
Lucas Oliveira
2745846930 Changed flows and components to support different models 2025-09-22 17:38:26 -03:00
11 changed files with 2110 additions and 691 deletions

View file

@ -2,7 +2,9 @@
"data": { "data": {
"id": "OllamaEmbeddings-4ah5Q", "id": "OllamaEmbeddings-4ah5Q",
"node": { "node": {
"base_classes": ["Embeddings"], "base_classes": [
"Embeddings"
],
"beta": false, "beta": false,
"conditional_paths": [], "conditional_paths": [],
"custom_fields": {}, "custom_fields": {},
@ -10,10 +12,13 @@
"display_name": "Ollama Embeddings", "display_name": "Ollama Embeddings",
"documentation": "https://python.langchain.com/docs/integrations/text_embedding/ollama", "documentation": "https://python.langchain.com/docs/integrations/text_embedding/ollama",
"edited": false, "edited": false,
"field_order": ["model_name", "base_url"], "field_order": [
"model_name",
"base_url"
],
"frozen": false, "frozen": false,
"icon": "Ollama", "icon": "Ollama",
"last_updated": "2025-09-17T20:01:59.954Z", "last_updated": "2025-09-22T20:18:27.128Z",
"legacy": false, "legacy": false,
"metadata": { "metadata": {
"code_hash": "0db0f99e91e9", "code_hash": "0db0f99e91e9",
@ -29,12 +34,17 @@
}, },
{ {
"name": "langflow", "name": "langflow",
"version": "1.5.0.post2" "version": null
} }
], ],
"total_dependencies": 3 "total_dependencies": 3
}, },
"keywords": ["model", "llm", "language model", "large language model"], "keywords": [
"model",
"llm",
"language model",
"large language model"
],
"module": "langflow.components.ollama.ollama_embeddings.OllamaEmbeddingsComponent" "module": "langflow.components.ollama.ollama_embeddings.OllamaEmbeddingsComponent"
}, },
"minimized": false, "minimized": false,
@ -51,7 +61,9 @@
"required_inputs": null, "required_inputs": null,
"selected": "Embeddings", "selected": "Embeddings",
"tool_mode": true, "tool_mode": true,
"types": ["Embeddings"], "types": [
"Embeddings"
],
"value": "__UNDEFINED__" "value": "__UNDEFINED__"
} }
], ],
@ -64,7 +76,9 @@
"display_name": "Ollama Base URL", "display_name": "Ollama Base URL",
"dynamic": false, "dynamic": false,
"info": "", "info": "",
"input_types": ["Message"], "input_types": [
"Message"
],
"list": false, "list": false,
"list_add_label": "Add More", "list_add_label": "Add More",
"load_from_db": true, "load_from_db": true,
@ -106,7 +120,9 @@
"dynamic": false, "dynamic": false,
"info": "", "info": "",
"name": "model_name", "name": "model_name",
"options": [], "options": [
"all-minilm:latest"
],
"options_metadata": [], "options_metadata": [],
"placeholder": "", "placeholder": "",
"real_time_refresh": true, "real_time_refresh": true,
@ -118,7 +134,7 @@
"tool_mode": false, "tool_mode": false,
"trace_as_metadata": true, "trace_as_metadata": true,
"type": "str", "type": "str",
"value": "" "value": "all-minilm:latest"
} }
}, },
"tool_mode": false "tool_mode": false
@ -133,8 +149,8 @@
"width": 320 "width": 320
}, },
"position": { "position": {
"x": 964, "x": 282.29416840859585,
"y": 248 "y": 279.4218065717267
}, },
"selected": false, "selected": false,
"type": "genericNode" "type": "genericNode"

View file

@ -2,7 +2,10 @@
"data": { "data": {
"id": "OllamaModel-eCsJx", "id": "OllamaModel-eCsJx",
"node": { "node": {
"base_classes": ["LanguageModel", "Message"], "base_classes": [
"LanguageModel",
"Message"
],
"beta": false, "beta": false,
"conditional_paths": [], "conditional_paths": [],
"custom_fields": {}, "custom_fields": {},
@ -40,7 +43,7 @@
], ],
"frozen": false, "frozen": false,
"icon": "Ollama", "icon": "Ollama",
"last_updated": "2025-09-17T20:01:59.191Z", "last_updated": "2025-09-22T20:14:45.057Z",
"legacy": false, "legacy": false,
"metadata": { "metadata": {
"code_hash": "af399d429d23", "code_hash": "af399d429d23",
@ -56,12 +59,17 @@
}, },
{ {
"name": "langflow", "name": "langflow",
"version": "1.5.0.post2" "version": null
} }
], ],
"total_dependencies": 3 "total_dependencies": 3
}, },
"keywords": ["model", "llm", "language model", "large language model"], "keywords": [
"model",
"llm",
"language model",
"large language model"
],
"module": "langflow.components.ollama.ollama.ChatOllamaComponent" "module": "langflow.components.ollama.ollama.ChatOllamaComponent"
}, },
"minimized": false, "minimized": false,
@ -77,7 +85,9 @@
"options": null, "options": null,
"required_inputs": null, "required_inputs": null,
"tool_mode": true, "tool_mode": true,
"types": ["Message"], "types": [
"Message"
],
"value": "__UNDEFINED__" "value": "__UNDEFINED__"
}, },
{ {
@ -91,7 +101,9 @@
"required_inputs": null, "required_inputs": null,
"selected": "LanguageModel", "selected": "LanguageModel",
"tool_mode": true, "tool_mode": true,
"types": ["LanguageModel"], "types": [
"LanguageModel"
],
"value": "__UNDEFINED__" "value": "__UNDEFINED__"
} }
], ],
@ -104,7 +116,9 @@
"display_name": "Base URL", "display_name": "Base URL",
"dynamic": false, "dynamic": false,
"info": "Endpoint of the Ollama API.", "info": "Endpoint of the Ollama API.",
"input_types": ["Message"], "input_types": [
"Message"
],
"list": false, "list": false,
"list_add_label": "Add More", "list_add_label": "Add More",
"load_from_db": true, "load_from_db": true,
@ -144,7 +158,9 @@
"display_name": "Format", "display_name": "Format",
"dynamic": false, "dynamic": false,
"info": "Specify the format of the output (e.g., json).", "info": "Specify the format of the output (e.g., json).",
"input_types": ["Message"], "input_types": [
"Message"
],
"list": false, "list": false,
"list_add_label": "Add More", "list_add_label": "Add More",
"load_from_db": false, "load_from_db": false,
@ -165,7 +181,9 @@
"display_name": "Input", "display_name": "Input",
"dynamic": false, "dynamic": false,
"info": "", "info": "",
"input_types": ["Message"], "input_types": [
"Message"
],
"list": false, "list": false,
"list_add_label": "Add More", "list_add_label": "Add More",
"load_from_db": false, "load_from_db": false,
@ -207,7 +225,11 @@
"dynamic": false, "dynamic": false,
"info": "Enable/disable Mirostat sampling for controlling perplexity.", "info": "Enable/disable Mirostat sampling for controlling perplexity.",
"name": "mirostat", "name": "mirostat",
"options": ["Disabled", "Mirostat", "Mirostat 2.0"], "options": [
"Disabled",
"Mirostat",
"Mirostat 2.0"
],
"options_metadata": [], "options_metadata": [],
"placeholder": "", "placeholder": "",
"real_time_refresh": true, "real_time_refresh": true,
@ -265,7 +287,9 @@
"dynamic": false, "dynamic": false,
"info": "Refer to https://ollama.com/library for more models.", "info": "Refer to https://ollama.com/library for more models.",
"name": "model_name", "name": "model_name",
"options": [], "options": [
"qwen3:4b"
],
"options_metadata": [], "options_metadata": [],
"placeholder": "", "placeholder": "",
"real_time_refresh": true, "real_time_refresh": true,
@ -277,7 +301,7 @@
"tool_mode": false, "tool_mode": false,
"trace_as_metadata": true, "trace_as_metadata": true,
"type": "str", "type": "str",
"value": "" "value": "qwen3:4b"
}, },
"num_ctx": { "num_ctx": {
"_input_type": "IntInput", "_input_type": "IntInput",
@ -375,7 +399,9 @@
"display_name": "Stop Tokens", "display_name": "Stop Tokens",
"dynamic": false, "dynamic": false,
"info": "Comma-separated list of tokens to signal the model to stop generating text.", "info": "Comma-separated list of tokens to signal the model to stop generating text.",
"input_types": ["Message"], "input_types": [
"Message"
],
"list": false, "list": false,
"list_add_label": "Add More", "list_add_label": "Add More",
"load_from_db": false, "load_from_db": false,
@ -414,7 +440,9 @@
"display_name": "System", "display_name": "System",
"dynamic": false, "dynamic": false,
"info": "System to use for generating text.", "info": "System to use for generating text.",
"input_types": ["Message"], "input_types": [
"Message"
],
"list": false, "list": false,
"list_add_label": "Add More", "list_add_label": "Add More",
"load_from_db": false, "load_from_db": false,
@ -436,7 +464,9 @@
"display_name": "System Message", "display_name": "System Message",
"dynamic": false, "dynamic": false,
"info": "System message to pass to the model.", "info": "System message to pass to the model.",
"input_types": ["Message"], "input_types": [
"Message"
],
"list": false, "list": false,
"list_add_label": "Add More", "list_add_label": "Add More",
"load_from_db": false, "load_from_db": false,
@ -458,7 +488,9 @@
"display_name": "Tags", "display_name": "Tags",
"dynamic": false, "dynamic": false,
"info": "Comma-separated list of tags to add to the run trace.", "info": "Comma-separated list of tags to add to the run trace.",
"input_types": ["Message"], "input_types": [
"Message"
],
"list": false, "list": false,
"list_add_label": "Add More", "list_add_label": "Add More",
"load_from_db": false, "load_from_db": false,
@ -507,7 +539,9 @@
"display_name": "Template", "display_name": "Template",
"dynamic": false, "dynamic": false,
"info": "Template to use for generating text.", "info": "Template to use for generating text.",
"input_types": ["Message"], "input_types": [
"Message"
],
"list": false, "list": false,
"list_add_label": "Add More", "list_add_label": "Add More",
"load_from_db": false, "load_from_db": false,
@ -638,14 +672,15 @@
"showNode": true, "showNode": true,
"type": "OllamaModel" "type": "OllamaModel"
}, },
"dragging": false,
"id": "OllamaModel-eCsJx", "id": "OllamaModel-eCsJx",
"measured": { "measured": {
"height": 494, "height": 494,
"width": 320 "width": 320
}, },
"position": { "position": {
"x": 554, "x": 248.08287272472313,
"y": 225 "y": 216.98088326271431
}, },
"selected": false, "selected": false,
"type": "genericNode" "type": "genericNode"

File diff suppressed because one or more lines are too long

View file

@ -2,7 +2,9 @@
"data": { "data": {
"id": "WatsonxEmbeddingsComponent-pJfXI", "id": "WatsonxEmbeddingsComponent-pJfXI",
"node": { "node": {
"base_classes": ["Embeddings"], "base_classes": [
"Embeddings"
],
"beta": false, "beta": false,
"conditional_paths": [], "conditional_paths": [],
"custom_fields": {}, "custom_fields": {},
@ -20,6 +22,7 @@
], ],
"frozen": false, "frozen": false,
"icon": "WatsonxAI", "icon": "WatsonxAI",
"last_updated": "2025-09-22T20:11:38.181Z",
"legacy": false, "legacy": false,
"metadata": { "metadata": {
"code_hash": "b6c6d50cc7ed", "code_hash": "b6c6d50cc7ed",
@ -43,7 +46,7 @@
}, },
{ {
"name": "langflow", "name": "langflow",
"version": "1.5.0.post2" "version": null
} }
], ],
"total_dependencies": 5 "total_dependencies": 5
@ -60,9 +63,13 @@
"group_outputs": false, "group_outputs": false,
"method": "build_embeddings", "method": "build_embeddings",
"name": "embeddings", "name": "embeddings",
"options": null,
"required_inputs": null,
"selected": "Embeddings", "selected": "Embeddings",
"tool_mode": true, "tool_mode": true,
"types": ["Embeddings"], "types": [
"Embeddings"
],
"value": "__UNDEFINED__" "value": "__UNDEFINED__"
} }
], ],
@ -131,7 +138,16 @@
"dynamic": true, "dynamic": true,
"info": "", "info": "",
"name": "model_name", "name": "model_name",
"options": [], "options": [
"ibm/granite-embedding-107m-multilingual",
"ibm/granite-embedding-278m-multilingual",
"ibm/slate-125m-english-rtrvr",
"ibm/slate-125m-english-rtrvr-v2",
"ibm/slate-30m-english-rtrvr",
"ibm/slate-30m-english-rtrvr-v2",
"intfloat/multilingual-e5-large",
"sentence-transformers/all-minilm-l6-v2"
],
"options_metadata": [], "options_metadata": [],
"placeholder": "", "placeholder": "",
"required": true, "required": true,
@ -140,7 +156,8 @@
"toggle": false, "toggle": false,
"tool_mode": false, "tool_mode": false,
"trace_as_metadata": true, "trace_as_metadata": true,
"type": "str" "type": "str",
"value": "ibm/granite-embedding-107m-multilingual"
}, },
"project_id": { "project_id": {
"_input_type": "StrInput", "_input_type": "StrInput",
@ -205,7 +222,8 @@
"toggle": false, "toggle": false,
"tool_mode": false, "tool_mode": false,
"trace_as_metadata": true, "trace_as_metadata": true,
"type": "str" "type": "str",
"value": "https://us-south.ml.cloud.ibm.com"
} }
}, },
"tool_mode": false "tool_mode": false
@ -213,14 +231,15 @@
"showNode": true, "showNode": true,
"type": "WatsonxEmbeddingsComponent" "type": "WatsonxEmbeddingsComponent"
}, },
"dragging": false,
"id": "WatsonxEmbeddingsComponent-pJfXI", "id": "WatsonxEmbeddingsComponent-pJfXI",
"measured": { "measured": {
"height": 467, "height": 467,
"width": 320 "width": 320
}, },
"position": { "position": {
"x": 999.129592360849, "x": 364.4406919374723,
"y": 753.2332292351236 "y": 282.29319267029086
}, },
"selected": false, "selected": false,
"type": "genericNode" "type": "genericNode"

View file

@ -2,7 +2,10 @@
"data": { "data": {
"id": "IBMwatsonxModel-jA4Nw", "id": "IBMwatsonxModel-jA4Nw",
"node": { "node": {
"base_classes": ["LanguageModel", "Message"], "base_classes": [
"LanguageModel",
"Message"
],
"beta": false, "beta": false,
"conditional_paths": [], "conditional_paths": [],
"custom_fields": {}, "custom_fields": {},
@ -31,6 +34,7 @@
], ],
"frozen": false, "frozen": false,
"icon": "WatsonxAI", "icon": "WatsonxAI",
"last_updated": "2025-09-22T20:03:31.248Z",
"legacy": false, "legacy": false,
"metadata": { "metadata": {
"code_hash": "7767fd69a954", "code_hash": "7767fd69a954",
@ -50,12 +54,17 @@
}, },
{ {
"name": "langflow", "name": "langflow",
"version": "1.5.0.post2" "version": null
} }
], ],
"total_dependencies": 4 "total_dependencies": 4
}, },
"keywords": ["model", "llm", "language model", "large language model"], "keywords": [
"model",
"llm",
"language model",
"large language model"
],
"module": "langflow.components.ibm.watsonx.WatsonxAIComponent" "module": "langflow.components.ibm.watsonx.WatsonxAIComponent"
}, },
"minimized": false, "minimized": false,
@ -68,8 +77,12 @@
"group_outputs": false, "group_outputs": false,
"method": "text_response", "method": "text_response",
"name": "text_output", "name": "text_output",
"options": null,
"required_inputs": null,
"tool_mode": true, "tool_mode": true,
"types": ["Message"], "types": [
"Message"
],
"value": "__UNDEFINED__" "value": "__UNDEFINED__"
}, },
{ {
@ -79,9 +92,13 @@
"group_outputs": false, "group_outputs": false,
"method": "build_model", "method": "build_model",
"name": "model_output", "name": "model_output",
"options": null,
"required_inputs": null,
"selected": "LanguageModel", "selected": "LanguageModel",
"tool_mode": true, "tool_mode": true,
"types": ["LanguageModel"], "types": [
"LanguageModel"
],
"value": "__UNDEFINED__" "value": "__UNDEFINED__"
} }
], ],
@ -157,7 +174,9 @@
"display_name": "Input", "display_name": "Input",
"dynamic": false, "dynamic": false,
"info": "", "info": "",
"input_types": ["Message"], "input_types": [
"Message"
],
"list": false, "list": false,
"list_add_label": "Add More", "list_add_label": "Add More",
"load_from_db": false, "load_from_db": false,
@ -242,7 +261,26 @@
"dynamic": true, "dynamic": true,
"info": "", "info": "",
"name": "model_name", "name": "model_name",
"options": [], "options": [
"ibm/granite-3-2-8b-instruct",
"ibm/granite-3-2b-instruct",
"ibm/granite-3-3-8b-instruct",
"ibm/granite-3-8b-instruct",
"ibm/granite-guardian-3-2b",
"ibm/granite-guardian-3-8b",
"ibm/granite-vision-3-2-2b",
"meta-llama/llama-3-2-11b-vision-instruct",
"meta-llama/llama-3-2-90b-vision-instruct",
"meta-llama/llama-3-3-70b-instruct",
"meta-llama/llama-3-405b-instruct",
"meta-llama/llama-4-maverick-17b-128e-instruct-fp8",
"meta-llama/llama-guard-3-11b-vision",
"mistralai/mistral-large",
"mistralai/mistral-medium-2505",
"mistralai/mistral-small-3-1-24b-instruct-2503",
"mistralai/pixtral-12b",
"openai/gpt-oss-120b"
],
"options_metadata": [], "options_metadata": [],
"placeholder": "", "placeholder": "",
"required": true, "required": true,
@ -251,7 +289,8 @@
"toggle": false, "toggle": false,
"tool_mode": false, "tool_mode": false,
"trace_as_metadata": true, "trace_as_metadata": true,
"type": "str" "type": "str",
"value": "ibm/granite-3-2-8b-instruct"
}, },
"presence_penalty": { "presence_penalty": {
"_input_type": "SliderInput", "_input_type": "SliderInput",
@ -362,7 +401,9 @@
"display_name": "System Message", "display_name": "System Message",
"dynamic": false, "dynamic": false,
"info": "System message to pass to the model.", "info": "System message to pass to the model.",
"input_types": ["Message"], "input_types": [
"Message"
],
"list": false, "list": false,
"list_add_label": "Add More", "list_add_label": "Add More",
"load_from_db": false, "load_from_db": false,
@ -484,7 +525,8 @@
"toggle": false, "toggle": false,
"tool_mode": false, "tool_mode": false,
"trace_as_metadata": true, "trace_as_metadata": true,
"type": "str" "type": "str",
"value": "https://us-south.ml.cloud.ibm.com"
} }
}, },
"tool_mode": false "tool_mode": false
@ -493,14 +535,15 @@
"showNode": true, "showNode": true,
"type": "IBMwatsonxModel" "type": "IBMwatsonxModel"
}, },
"dragging": false,
"id": "IBMwatsonxModel-jA4Nw", "id": "IBMwatsonxModel-jA4Nw",
"measured": { "measured": {
"height": 632, "height": 632,
"width": 320 "width": 320
}, },
"position": { "position": {
"x": 562.2658900512183, "x": 371.93566807042805,
"y": 895.3455179382565 "y": 197.47711431325635
}, },
"selected": false, "selected": false,
"type": "genericNode" "type": "genericNode"

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1,10 +1,11 @@
"use client"; "use client";
import { useRouter } from "next/navigation";
import { Suspense, useEffect, useState } from "react"; import { Suspense, useEffect, useState } from "react";
import { toast } from "sonner"; import { toast } from "sonner";
import { import {
useOnboardingMutation,
type OnboardingVariables, type OnboardingVariables,
useOnboardingMutation,
} from "@/app/api/mutations/useOnboardingMutation"; } from "@/app/api/mutations/useOnboardingMutation";
import IBMLogo from "@/components/logo/ibm-logo"; import IBMLogo from "@/components/logo/ibm-logo";
import OllamaLogo from "@/components/logo/ollama-logo"; import OllamaLogo from "@/components/logo/ollama-logo";
@ -18,16 +19,15 @@ import {
CardHeader, CardHeader,
} from "@/components/ui/card"; } from "@/components/ui/card";
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs"; import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs";
import { IBMOnboarding } from "./components/ibm-onboarding";
import { OllamaOnboarding } from "./components/ollama-onboarding";
import { OpenAIOnboarding } from "./components/openai-onboarding";
import { import {
Tooltip, Tooltip,
TooltipContent, TooltipContent,
TooltipTrigger, TooltipTrigger,
} from "@/components/ui/tooltip"; } from "@/components/ui/tooltip";
import { useGetSettingsQuery } from "../api/queries/useGetSettingsQuery"; import { useGetSettingsQuery } from "../api/queries/useGetSettingsQuery";
import { useRouter } from "next/navigation"; import { IBMOnboarding } from "./components/ibm-onboarding";
import { OllamaOnboarding } from "./components/ollama-onboarding";
import { OpenAIOnboarding } from "./components/openai-onboarding";
function OnboardingPage() { function OnboardingPage() {
const { data: settingsDb, isLoading: isSettingsLoading } = const { data: settingsDb, isLoading: isSettingsLoading } =
@ -42,7 +42,7 @@ function OnboardingPage() {
if (!isSettingsLoading && settingsDb && settingsDb.edited) { if (!isSettingsLoading && settingsDb && settingsDb.edited) {
router.push(redirect); router.push(redirect);
} }
}, [isSettingsLoading, redirect]); }, [isSettingsLoading, settingsDb, router]);
const [modelProvider, setModelProvider] = useState<string>("openai"); const [modelProvider, setModelProvider] = useState<string>("openai");
@ -68,6 +68,7 @@ function OnboardingPage() {
onSuccess: (data) => { onSuccess: (data) => {
toast.success("Onboarding completed successfully!"); toast.success("Onboarding completed successfully!");
console.log("Onboarding completed successfully", data); console.log("Onboarding completed successfully", data);
router.push(redirect);
}, },
onError: (error) => { onError: (error) => {
toast.error("Failed to complete onboarding", { toast.error("Failed to complete onboarding", {

View file

@ -496,12 +496,18 @@ class AppClients:
WATSONX_LLM_COMPONENT_PATH = os.getenv( WATSONX_LLM_COMPONENT_PATH = os.getenv(
"WATSONX_LLM_COMPONENT_PATH", "flows/components/watsonx_llm.json" "WATSONX_LLM_COMPONENT_PATH", "flows/components/watsonx_llm.json"
) )
WATSONX_LLM_TEXT_COMPONENT_PATH = os.getenv(
"WATSONX_LLM_TEXT_COMPONENT_PATH", "flows/components/watsonx_llm_text.json"
)
WATSONX_EMBEDDING_COMPONENT_PATH = os.getenv( WATSONX_EMBEDDING_COMPONENT_PATH = os.getenv(
"WATSONX_EMBEDDING_COMPONENT_PATH", "flows/components/watsonx_embedding.json" "WATSONX_EMBEDDING_COMPONENT_PATH", "flows/components/watsonx_embedding.json"
) )
OLLAMA_LLM_COMPONENT_PATH = os.getenv( OLLAMA_LLM_COMPONENT_PATH = os.getenv(
"OLLAMA_LLM_COMPONENT_PATH", "flows/components/ollama_llm.json" "OLLAMA_LLM_COMPONENT_PATH", "flows/components/ollama_llm.json"
) )
OLLAMA_LLM_TEXT_COMPONENT_PATH = os.getenv(
"OLLAMA_LLM_TEXT_COMPONENT_PATH", "flows/components/ollama_llm_text.json"
)
OLLAMA_EMBEDDING_COMPONENT_PATH = os.getenv( OLLAMA_EMBEDDING_COMPONENT_PATH = os.getenv(
"OLLAMA_EMBEDDING_COMPONENT_PATH", "flows/components/ollama_embedding.json" "OLLAMA_EMBEDDING_COMPONENT_PATH", "flows/components/ollama_embedding.json"
) )
@ -514,6 +520,9 @@ OPENAI_EMBEDDING_COMPONENT_ID = os.getenv(
OPENAI_LLM_COMPONENT_ID = os.getenv( OPENAI_LLM_COMPONENT_ID = os.getenv(
"OPENAI_LLM_COMPONENT_ID", "LanguageModelComponent-0YME7" "OPENAI_LLM_COMPONENT_ID", "LanguageModelComponent-0YME7"
) )
OPENAI_LLM_TEXT_COMPONENT_ID = os.getenv(
"OPENAI_LLM_TEXT_COMPONENT_ID", "LanguageModelComponent-NSTA6"
)
# Provider-specific component IDs # Provider-specific component IDs
WATSONX_EMBEDDING_COMPONENT_ID = os.getenv( WATSONX_EMBEDDING_COMPONENT_ID = os.getenv(
@ -522,11 +531,18 @@ WATSONX_EMBEDDING_COMPONENT_ID = os.getenv(
WATSONX_LLM_COMPONENT_ID = os.getenv( WATSONX_LLM_COMPONENT_ID = os.getenv(
"WATSONX_LLM_COMPONENT_ID", "IBMwatsonxModel-jA4Nw" "WATSONX_LLM_COMPONENT_ID", "IBMwatsonxModel-jA4Nw"
) )
WATSONX_LLM_TEXT_COMPONENT_ID = os.getenv(
"WATSONX_LLM_TEXT_COMPONENT_ID", "IBMwatsonxModel-18kmA"
)
OLLAMA_EMBEDDING_COMPONENT_ID = os.getenv( OLLAMA_EMBEDDING_COMPONENT_ID = os.getenv(
"OLLAMA_EMBEDDING_COMPONENT_ID", "OllamaEmbeddings-4ah5Q" "OLLAMA_EMBEDDING_COMPONENT_ID", "OllamaEmbeddings-4ah5Q"
) )
OLLAMA_LLM_COMPONENT_ID = os.getenv("OLLAMA_LLM_COMPONENT_ID", "OllamaModel-eCsJx") OLLAMA_LLM_COMPONENT_ID = os.getenv("OLLAMA_LLM_COMPONENT_ID", "OllamaModel-eCsJx")
OLLAMA_LLM_TEXT_COMPONENT_ID = os.getenv(
"OLLAMA_LLM_TEXT_COMPONENT_ID", "OllamaModel-XDGqZ"
)
# Global clients instance # Global clients instance
clients = AppClients() clients = AppClients()

View file

@ -3,8 +3,13 @@ from config.settings import (
LANGFLOW_URL, LANGFLOW_URL,
LANGFLOW_CHAT_FLOW_ID, LANGFLOW_CHAT_FLOW_ID,
LANGFLOW_INGEST_FLOW_ID, LANGFLOW_INGEST_FLOW_ID,
OLLAMA_LLM_TEXT_COMPONENT_ID,
OLLAMA_LLM_TEXT_COMPONENT_PATH,
OPENAI_EMBEDDING_COMPONENT_ID, OPENAI_EMBEDDING_COMPONENT_ID,
OPENAI_LLM_COMPONENT_ID, OPENAI_LLM_COMPONENT_ID,
OPENAI_LLM_TEXT_COMPONENT_ID,
WATSONX_LLM_TEXT_COMPONENT_ID,
WATSONX_LLM_TEXT_COMPONENT_PATH,
clients, clients,
WATSONX_LLM_COMPONENT_PATH, WATSONX_LLM_COMPONENT_PATH,
WATSONX_EMBEDDING_COMPONENT_PATH, WATSONX_EMBEDDING_COMPONENT_PATH,
@ -146,7 +151,7 @@ class FlowsService:
try: try:
# Load component templates based on provider # Load component templates based on provider
llm_template, embedding_template = self._load_component_templates(provider) llm_template, embedding_template, llm_text_template = self._load_component_templates(provider)
logger.info(f"Assigning {provider} components") logger.info(f"Assigning {provider} components")
@ -158,6 +163,7 @@ class FlowsService:
"flow_id": NUDGES_FLOW_ID, "flow_id": NUDGES_FLOW_ID,
"embedding_id": OPENAI_EMBEDDING_COMPONENT_ID, "embedding_id": OPENAI_EMBEDDING_COMPONENT_ID,
"llm_id": OPENAI_LLM_COMPONENT_ID, "llm_id": OPENAI_LLM_COMPONENT_ID,
"llm_text_id": OPENAI_LLM_TEXT_COMPONENT_ID,
}, },
{ {
"name": "retrieval", "name": "retrieval",
@ -165,6 +171,7 @@ class FlowsService:
"flow_id": LANGFLOW_CHAT_FLOW_ID, "flow_id": LANGFLOW_CHAT_FLOW_ID,
"embedding_id": OPENAI_EMBEDDING_COMPONENT_ID, "embedding_id": OPENAI_EMBEDDING_COMPONENT_ID,
"llm_id": OPENAI_LLM_COMPONENT_ID, "llm_id": OPENAI_LLM_COMPONENT_ID,
"llm_text_id": None,
}, },
{ {
"name": "ingest", "name": "ingest",
@ -172,6 +179,7 @@ class FlowsService:
"flow_id": LANGFLOW_INGEST_FLOW_ID, "flow_id": LANGFLOW_INGEST_FLOW_ID,
"embedding_id": OPENAI_EMBEDDING_COMPONENT_ID, "embedding_id": OPENAI_EMBEDDING_COMPONENT_ID,
"llm_id": None, # Ingestion flow might not have LLM "llm_id": None, # Ingestion flow might not have LLM
"llm_text_id": None, # Ingestion flow might not have LLM Text
}, },
] ]
@ -181,7 +189,7 @@ class FlowsService:
for config in flow_configs: for config in flow_configs:
try: try:
result = await self._update_flow_components( result = await self._update_flow_components(
config, llm_template, embedding_template config, llm_template, embedding_template, llm_text_template
) )
results.append(result) results.append(result)
logger.info(f"Successfully updated {config['name']} flow") logger.info(f"Successfully updated {config['name']} flow")
@ -215,9 +223,11 @@ class FlowsService:
if provider == "watsonx": if provider == "watsonx":
llm_path = WATSONX_LLM_COMPONENT_PATH llm_path = WATSONX_LLM_COMPONENT_PATH
embedding_path = WATSONX_EMBEDDING_COMPONENT_PATH embedding_path = WATSONX_EMBEDDING_COMPONENT_PATH
llm_text_path = WATSONX_LLM_TEXT_COMPONENT_PATH
elif provider == "ollama": elif provider == "ollama":
llm_path = OLLAMA_LLM_COMPONENT_PATH llm_path = OLLAMA_LLM_COMPONENT_PATH
embedding_path = OLLAMA_EMBEDDING_COMPONENT_PATH embedding_path = OLLAMA_EMBEDDING_COMPONENT_PATH
llm_text_path = OLLAMA_LLM_TEXT_COMPONENT_PATH
else: else:
raise ValueError(f"Unsupported provider: {provider}") raise ValueError(f"Unsupported provider: {provider}")
@ -246,21 +256,31 @@ class FlowsService:
with open(embedding_full_path, "r") as f: with open(embedding_full_path, "r") as f:
embedding_template = json.load(f) embedding_template = json.load(f)
logger.info(f"Loaded component templates for {provider}") # Load LLM Text template
return llm_template, embedding_template llm_text_full_path = os.path.join(project_root, llm_text_path)
if not os.path.exists(llm_text_full_path):
raise FileNotFoundError(
f"LLM Text component template not found at: {llm_text_full_path}"
)
async def _update_flow_components(self, config, llm_template, embedding_template): with open(llm_text_full_path, "r") as f:
llm_text_template = json.load(f)
logger.info(f"Loaded component templates for {provider}")
return llm_template, embedding_template, llm_text_template
async def _update_flow_components(self, config, llm_template, embedding_template, llm_text_template):
"""Update components in a specific flow""" """Update components in a specific flow"""
flow_name = config["name"] flow_name = config["name"]
flow_file = config["file"] flow_file = config["file"]
flow_id = config["flow_id"] flow_id = config["flow_id"]
old_embedding_id = config["embedding_id"] old_embedding_id = config["embedding_id"]
old_llm_id = config["llm_id"] old_llm_id = config["llm_id"]
old_llm_text_id = config["llm_text_id"]
# Extract IDs from templates # Extract IDs from templates
new_llm_id = llm_template["data"]["id"] new_llm_id = llm_template["data"]["id"]
new_embedding_id = embedding_template["data"]["id"] new_embedding_id = embedding_template["data"]["id"]
new_llm_text_id = llm_text_template["data"]["id"]
# Get the project root directory # Get the project root directory
current_file_dir = os.path.dirname(os.path.abspath(__file__)) current_file_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.dirname(current_file_dir) src_dir = os.path.dirname(current_file_dir)
@ -308,6 +328,21 @@ class FlowsService:
self._replace_node_in_flow(flow_data, old_llm_id, new_llm_node) self._replace_node_in_flow(flow_data, old_llm_id, new_llm_node)
components_updated.append(f"llm: {old_llm_id} -> {new_llm_id}") components_updated.append(f"llm: {old_llm_id} -> {new_llm_id}")
# Replace LLM component (if exists in this flow)
if old_llm_text_id:
llm_text_node = self._find_node_by_id(flow_data, old_llm_text_id)
if llm_text_node:
# Preserve position
original_position = llm_text_node.get("position", {})
# Replace with new template
new_llm_text_node = llm_text_template.copy()
new_llm_text_node["position"] = original_position
# Replace in flow
self._replace_node_in_flow(flow_data, old_llm_text_id, new_llm_text_node)
components_updated.append(f"llm: {old_llm_text_id} -> {new_llm_text_id}")
# Update all edge references using regex replacement # Update all edge references using regex replacement
flow_json_str = json.dumps(flow_data) flow_json_str = json.dumps(flow_data)
@ -326,6 +361,11 @@ class FlowsService:
flow_json_str = re.sub( flow_json_str = re.sub(
re.escape(old_llm_id), new_llm_id, flow_json_str re.escape(old_llm_id), new_llm_id, flow_json_str
) )
if old_llm_text_id:
flow_json_str = re.sub(
re.escape(old_llm_text_id), new_llm_text_id, flow_json_str
)
flow_json_str = re.sub( flow_json_str = re.sub(
re.escape(old_llm_id.split("-")[0]), re.escape(old_llm_id.split("-")[0]),
new_llm_id.split("-")[0], new_llm_id.split("-")[0],
@ -415,7 +455,7 @@ class FlowsService:
] ]
# Determine target component IDs based on provider # Determine target component IDs based on provider
target_embedding_id, target_llm_id = self._get_provider_component_ids( target_embedding_id, target_llm_id, target_llm_text_id = self._get_provider_component_ids(
provider provider
) )
@ -429,6 +469,7 @@ class FlowsService:
provider, provider,
target_embedding_id, target_embedding_id,
target_llm_id, target_llm_id,
target_llm_text_id,
embedding_model, embedding_model,
llm_model, llm_model,
endpoint, endpoint,
@ -471,12 +512,12 @@ class FlowsService:
def _get_provider_component_ids(self, provider: str): def _get_provider_component_ids(self, provider: str):
"""Get the component IDs for a specific provider""" """Get the component IDs for a specific provider"""
if provider == "watsonx": if provider == "watsonx":
return WATSONX_EMBEDDING_COMPONENT_ID, WATSONX_LLM_COMPONENT_ID return WATSONX_EMBEDDING_COMPONENT_ID, WATSONX_LLM_COMPONENT_ID, WATSONX_LLM_TEXT_COMPONENT_ID
elif provider == "ollama": elif provider == "ollama":
return OLLAMA_EMBEDDING_COMPONENT_ID, OLLAMA_LLM_COMPONENT_ID return OLLAMA_EMBEDDING_COMPONENT_ID, OLLAMA_LLM_COMPONENT_ID, OLLAMA_LLM_TEXT_COMPONENT_ID
elif provider == "openai": elif provider == "openai":
# OpenAI components are the default ones # OpenAI components are the default ones
return OPENAI_EMBEDDING_COMPONENT_ID, OPENAI_LLM_COMPONENT_ID return OPENAI_EMBEDDING_COMPONENT_ID, OPENAI_LLM_COMPONENT_ID, OPENAI_LLM_TEXT_COMPONENT_ID
else: else:
raise ValueError(f"Unsupported provider: {provider}") raise ValueError(f"Unsupported provider: {provider}")
@ -486,6 +527,7 @@ class FlowsService:
provider: str, provider: str,
target_embedding_id: str, target_embedding_id: str,
target_llm_id: str, target_llm_id: str,
target_llm_text_id: str,
embedding_model: str, embedding_model: str,
llm_model: str, llm_model: str,
endpoint: str = None, endpoint: str = None,
@ -512,7 +554,7 @@ class FlowsService:
embedding_node = self._find_node_by_id(flow_data, target_embedding_id) embedding_node = self._find_node_by_id(flow_data, target_embedding_id)
if embedding_node: if embedding_node:
if self._update_component_fields( if self._update_component_fields(
embedding_node, provider, "embedding", embedding_model, endpoint embedding_node, provider, embedding_model, endpoint
): ):
updates_made.append(f"embedding model: {embedding_model}") updates_made.append(f"embedding model: {embedding_model}")
@ -521,7 +563,15 @@ class FlowsService:
llm_node = self._find_node_by_id(flow_data, target_llm_id) llm_node = self._find_node_by_id(flow_data, target_llm_id)
if llm_node: if llm_node:
if self._update_component_fields( if self._update_component_fields(
llm_node, provider, "llm", llm_model, endpoint llm_node, provider, llm_model, endpoint
):
updates_made.append(f"llm model: {llm_model}")
if target_llm_text_id:
llm_text_node = self._find_node_by_id(flow_data, target_llm_text_id)
if llm_text_node:
if self._update_component_fields(
llm_text_node, provider, llm_model, endpoint
): ):
updates_made.append(f"llm model: {llm_model}") updates_made.append(f"llm model: {llm_model}")
@ -569,7 +619,11 @@ class FlowsService:
updated = False updated = False
# Update model_name field (common to all providers) # Update model_name field (common to all providers)
if "model_name" in template: if provider == "openai" and "model" in template:
template["model"]["value"] = model_value
template["model"]["options"] = [model_value]
updated = True
elif "model_name" in template:
template["model_name"]["value"] = model_value template["model_name"]["value"] = model_value
template["model_name"]["options"] = [model_value] template["model_name"]["options"] = [model_value]
updated = True updated = True

View file

@ -124,10 +124,8 @@ class ModelsService:
for model in models: for model in models:
model_name = model.get(JSON_NAME_KEY, "") model_name = model.get(JSON_NAME_KEY, "")
# Remove tag if present (e.g., "llama3:latest" -> "llama3")
clean_model_name = model_name.split(":")[0] if model_name else ""
if not clean_model_name: if not model_name:
continue continue
logger.debug(f"Checking model: {model_name}") logger.debug(f"Checking model: {model_name}")
@ -152,7 +150,7 @@ class ModelsService:
# Check if it's an embedding model # Check if it's an embedding model
is_embedding = any( is_embedding = any(
embed_model in clean_model_name.lower() embed_model in model_name.lower()
for embed_model in self.OLLAMA_EMBEDDING_MODELS for embed_model in self.OLLAMA_EMBEDDING_MODELS
) )
@ -160,8 +158,8 @@ class ModelsService:
# Embedding models only need completion capability # Embedding models only need completion capability
embedding_models.append( embedding_models.append(
{ {
"value": clean_model_name, "value": model_name,
"label": clean_model_name, "label": model_name,
"default": False, "default": False,
} }
) )
@ -169,9 +167,9 @@ class ModelsService:
# Language models need both completion and tool calling # Language models need both completion and tool calling
language_models.append( language_models.append(
{ {
"value": clean_model_name, "value": model_name,
"label": clean_model_name, "label": model_name,
"default": "llama3" in clean_model_name.lower(), "default": "llama3" in model_name.lower(),
} }
) )
except Exception as e: except Exception as e: