From c6794adca934f0243c49c497f2b5fd192d2a2410 Mon Sep 17 00:00:00 2001 From: Lucas Oliveira <62335616+lucaseduoli@users.noreply.github.com> Date: Mon, 22 Sep 2025 18:02:56 -0300 Subject: [PATCH] fix: make different providers have their edges connected, redirect to main page from onboarding (#58) * Changed flows and components to support different models * Changed onboarding to redirect automatically * Added new components and ids to settings * Changed flows service to change llm text components as well * changed models service to not remove : on ollama * fix edge not connecting on nudges flow --- flows/components/ollama_embedding.json | 40 +- flows/components/ollama_llm.json | 75 +- flows/components/ollama_llm_text.json | 687 ++++++++++++++++++ flows/components/watsonx_embedding.json | 37 +- flows/components/watsonx_llm.json | 69 +- flows/components/watsonx_llm_text.json | 551 +++++++++++++++ flows/openrag_nudges.json | 887 ++++++++++++------------ frontend/src/app/onboarding/page.tsx | 341 ++++----- src/config/settings.py | 16 + src/services/flows_service.py | 82 ++- src/services/models_service.py | 16 +- 11 files changed, 2110 insertions(+), 691 deletions(-) create mode 100644 flows/components/ollama_llm_text.json create mode 100644 flows/components/watsonx_llm_text.json diff --git a/flows/components/ollama_embedding.json b/flows/components/ollama_embedding.json index 707179a8..01b83c44 100644 --- a/flows/components/ollama_embedding.json +++ b/flows/components/ollama_embedding.json @@ -2,7 +2,9 @@ "data": { "id": "OllamaEmbeddings-4ah5Q", "node": { - "base_classes": ["Embeddings"], + "base_classes": [ + "Embeddings" + ], "beta": false, "conditional_paths": [], "custom_fields": {}, @@ -10,10 +12,13 @@ "display_name": "Ollama Embeddings", "documentation": "https://python.langchain.com/docs/integrations/text_embedding/ollama", "edited": false, - "field_order": ["model_name", "base_url"], + "field_order": [ + "model_name", + "base_url" + ], "frozen": false, "icon": "Ollama", - "last_updated": "2025-09-17T20:01:59.954Z", + "last_updated": "2025-09-22T20:18:27.128Z", "legacy": false, "metadata": { "code_hash": "0db0f99e91e9", @@ -29,12 +34,17 @@ }, { "name": "langflow", - "version": "1.5.0.post2" + "version": null } ], "total_dependencies": 3 }, - "keywords": ["model", "llm", "language model", "large language model"], + "keywords": [ + "model", + "llm", + "language model", + "large language model" + ], "module": "langflow.components.ollama.ollama_embeddings.OllamaEmbeddingsComponent" }, "minimized": false, @@ -51,7 +61,9 @@ "required_inputs": null, "selected": "Embeddings", "tool_mode": true, - "types": ["Embeddings"], + "types": [ + "Embeddings" + ], "value": "__UNDEFINED__" } ], @@ -64,7 +76,9 @@ "display_name": "Ollama Base URL", "dynamic": false, "info": "", - "input_types": ["Message"], + "input_types": [ + "Message" + ], "list": false, "list_add_label": "Add More", "load_from_db": true, @@ -106,7 +120,9 @@ "dynamic": false, "info": "", "name": "model_name", - "options": [], + "options": [ + "all-minilm:latest" + ], "options_metadata": [], "placeholder": "", "real_time_refresh": true, @@ -118,7 +134,7 @@ "tool_mode": false, "trace_as_metadata": true, "type": "str", - "value": "" + "value": "all-minilm:latest" } }, "tool_mode": false @@ -133,9 +149,9 @@ "width": 320 }, "position": { - "x": 964, - "y": 248 + "x": 282.29416840859585, + "y": 279.4218065717267 }, "selected": false, "type": "genericNode" -} +} \ No newline at end of file diff --git a/flows/components/ollama_llm.json b/flows/components/ollama_llm.json index cbe5e10d..0edf7f13 100644 --- a/flows/components/ollama_llm.json +++ b/flows/components/ollama_llm.json @@ -2,7 +2,10 @@ "data": { "id": "OllamaModel-eCsJx", "node": { - "base_classes": ["LanguageModel", "Message"], + "base_classes": [ + "LanguageModel", + "Message" + ], "beta": false, "conditional_paths": [], "custom_fields": {}, @@ -40,7 +43,7 @@ ], "frozen": false, "icon": "Ollama", - "last_updated": "2025-09-17T20:01:59.191Z", + "last_updated": "2025-09-22T20:14:45.057Z", "legacy": false, "metadata": { "code_hash": "af399d429d23", @@ -56,12 +59,17 @@ }, { "name": "langflow", - "version": "1.5.0.post2" + "version": null } ], "total_dependencies": 3 }, - "keywords": ["model", "llm", "language model", "large language model"], + "keywords": [ + "model", + "llm", + "language model", + "large language model" + ], "module": "langflow.components.ollama.ollama.ChatOllamaComponent" }, "minimized": false, @@ -77,7 +85,9 @@ "options": null, "required_inputs": null, "tool_mode": true, - "types": ["Message"], + "types": [ + "Message" + ], "value": "__UNDEFINED__" }, { @@ -91,7 +101,9 @@ "required_inputs": null, "selected": "LanguageModel", "tool_mode": true, - "types": ["LanguageModel"], + "types": [ + "LanguageModel" + ], "value": "__UNDEFINED__" } ], @@ -104,7 +116,9 @@ "display_name": "Base URL", "dynamic": false, "info": "Endpoint of the Ollama API.", - "input_types": ["Message"], + "input_types": [ + "Message" + ], "list": false, "list_add_label": "Add More", "load_from_db": true, @@ -144,7 +158,9 @@ "display_name": "Format", "dynamic": false, "info": "Specify the format of the output (e.g., json).", - "input_types": ["Message"], + "input_types": [ + "Message" + ], "list": false, "list_add_label": "Add More", "load_from_db": false, @@ -165,7 +181,9 @@ "display_name": "Input", "dynamic": false, "info": "", - "input_types": ["Message"], + "input_types": [ + "Message" + ], "list": false, "list_add_label": "Add More", "load_from_db": false, @@ -207,7 +225,11 @@ "dynamic": false, "info": "Enable/disable Mirostat sampling for controlling perplexity.", "name": "mirostat", - "options": ["Disabled", "Mirostat", "Mirostat 2.0"], + "options": [ + "Disabled", + "Mirostat", + "Mirostat 2.0" + ], "options_metadata": [], "placeholder": "", "real_time_refresh": true, @@ -265,7 +287,9 @@ "dynamic": false, "info": "Refer to https://ollama.com/library for more models.", "name": "model_name", - "options": [], + "options": [ + "qwen3:4b" + ], "options_metadata": [], "placeholder": "", "real_time_refresh": true, @@ -277,7 +301,7 @@ "tool_mode": false, "trace_as_metadata": true, "type": "str", - "value": "" + "value": "qwen3:4b" }, "num_ctx": { "_input_type": "IntInput", @@ -375,7 +399,9 @@ "display_name": "Stop Tokens", "dynamic": false, "info": "Comma-separated list of tokens to signal the model to stop generating text.", - "input_types": ["Message"], + "input_types": [ + "Message" + ], "list": false, "list_add_label": "Add More", "load_from_db": false, @@ -414,7 +440,9 @@ "display_name": "System", "dynamic": false, "info": "System to use for generating text.", - "input_types": ["Message"], + "input_types": [ + "Message" + ], "list": false, "list_add_label": "Add More", "load_from_db": false, @@ -436,7 +464,9 @@ "display_name": "System Message", "dynamic": false, "info": "System message to pass to the model.", - "input_types": ["Message"], + "input_types": [ + "Message" + ], "list": false, "list_add_label": "Add More", "load_from_db": false, @@ -458,7 +488,9 @@ "display_name": "Tags", "dynamic": false, "info": "Comma-separated list of tags to add to the run trace.", - "input_types": ["Message"], + "input_types": [ + "Message" + ], "list": false, "list_add_label": "Add More", "load_from_db": false, @@ -507,7 +539,9 @@ "display_name": "Template", "dynamic": false, "info": "Template to use for generating text.", - "input_types": ["Message"], + "input_types": [ + "Message" + ], "list": false, "list_add_label": "Add More", "load_from_db": false, @@ -638,15 +672,16 @@ "showNode": true, "type": "OllamaModel" }, + "dragging": false, "id": "OllamaModel-eCsJx", "measured": { "height": 494, "width": 320 }, "position": { - "x": 554, - "y": 225 + "x": 248.08287272472313, + "y": 216.98088326271431 }, "selected": false, "type": "genericNode" -} +} \ No newline at end of file diff --git a/flows/components/ollama_llm_text.json b/flows/components/ollama_llm_text.json new file mode 100644 index 00000000..846e8313 --- /dev/null +++ b/flows/components/ollama_llm_text.json @@ -0,0 +1,687 @@ +{ + "data": { + "id": "OllamaModel-XDGqZ", + "node": { + "base_classes": [ + "LanguageModel", + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Generate text using Ollama Local LLMs.", + "display_name": "Ollama", + "documentation": "", + "edited": false, + "field_order": [ + "base_url", + "model_name", + "temperature", + "format", + "metadata", + "mirostat", + "mirostat_eta", + "mirostat_tau", + "num_ctx", + "num_gpu", + "num_thread", + "repeat_last_n", + "repeat_penalty", + "tfs_z", + "timeout", + "top_k", + "top_p", + "verbose", + "tags", + "stop_tokens", + "system", + "tool_model_enabled", + "template", + "input_value", + "system_message", + "stream" + ], + "frozen": false, + "icon": "Ollama", + "last_updated": "2025-09-22T20:14:45.057Z", + "legacy": false, + "metadata": { + "code_hash": "af399d429d23", + "dependencies": { + "dependencies": [ + { + "name": "httpx", + "version": "0.28.1" + }, + { + "name": "langchain_ollama", + "version": "0.2.1" + }, + { + "name": "langflow", + "version": null + } + ], + "total_dependencies": 3 + }, + "keywords": [ + "model", + "llm", + "language model", + "large language model" + ], + "module": "langflow.components.ollama.ollama.ChatOllamaComponent" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Model Response", + "group_outputs": false, + "method": "text_response", + "name": "text_output", + "options": null, + "required_inputs": null, + "selected": "Message", + "tool_mode": true, + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + }, + { + "allows_loop": false, + "cache": true, + "display_name": "Language Model", + "group_outputs": false, + "method": "build_model", + "name": "model_output", + "options": null, + "required_inputs": null, + "tool_mode": true, + "types": [ + "LanguageModel" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_type": "Component", + "base_url": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "Base URL", + "dynamic": false, + "info": "Endpoint of the Ollama API.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": true, + "name": "base_url", + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "type": "str", + "value": "OLLAMA_BASE_URL" + }, + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "import asyncio\nfrom typing import Any\nfrom urllib.parse import urljoin\n\nimport httpx\nfrom langchain_ollama import ChatOllama\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.ollama_constants import URL_LIST\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, MessageTextInput, SliderInput\nfrom langflow.logging import logger\n\nHTTP_STATUS_OK = 200\n\n\nclass ChatOllamaComponent(LCModelComponent):\n display_name = \"Ollama\"\n description = \"Generate text using Ollama Local LLMs.\"\n icon = \"Ollama\"\n name = \"OllamaModel\"\n\n # Define constants for JSON keys\n JSON_MODELS_KEY = \"models\"\n JSON_NAME_KEY = \"name\"\n JSON_CAPABILITIES_KEY = \"capabilities\"\n DESIRED_CAPABILITY = \"completion\"\n TOOL_CALLING_CAPABILITY = \"tools\"\n\n inputs = [\n MessageTextInput(\n name=\"base_url\",\n display_name=\"Base URL\",\n info=\"Endpoint of the Ollama API.\",\n value=\"\",\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=[],\n info=\"Refer to https://ollama.com/library for more models.\",\n refresh_button=True,\n real_time_refresh=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n MessageTextInput(\n name=\"format\", display_name=\"Format\", info=\"Specify the format of the output (e.g., json).\", advanced=True\n ),\n DictInput(name=\"metadata\", display_name=\"Metadata\", info=\"Metadata to add to the run trace.\", advanced=True),\n DropdownInput(\n name=\"mirostat\",\n display_name=\"Mirostat\",\n options=[\"Disabled\", \"Mirostat\", \"Mirostat 2.0\"],\n info=\"Enable/disable Mirostat sampling for controlling perplexity.\",\n value=\"Disabled\",\n advanced=True,\n real_time_refresh=True,\n ),\n FloatInput(\n name=\"mirostat_eta\",\n display_name=\"Mirostat Eta\",\n info=\"Learning rate for Mirostat algorithm. (Default: 0.1)\",\n advanced=True,\n ),\n FloatInput(\n name=\"mirostat_tau\",\n display_name=\"Mirostat Tau\",\n info=\"Controls the balance between coherence and diversity of the output. (Default: 5.0)\",\n advanced=True,\n ),\n IntInput(\n name=\"num_ctx\",\n display_name=\"Context Window Size\",\n info=\"Size of the context window for generating tokens. (Default: 2048)\",\n advanced=True,\n ),\n IntInput(\n name=\"num_gpu\",\n display_name=\"Number of GPUs\",\n info=\"Number of GPUs to use for computation. (Default: 1 on macOS, 0 to disable)\",\n advanced=True,\n ),\n IntInput(\n name=\"num_thread\",\n display_name=\"Number of Threads\",\n info=\"Number of threads to use during computation. (Default: detected for optimal performance)\",\n advanced=True,\n ),\n IntInput(\n name=\"repeat_last_n\",\n display_name=\"Repeat Last N\",\n info=\"How far back the model looks to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)\",\n advanced=True,\n ),\n FloatInput(\n name=\"repeat_penalty\",\n display_name=\"Repeat Penalty\",\n info=\"Penalty for repetitions in generated text. (Default: 1.1)\",\n advanced=True,\n ),\n FloatInput(name=\"tfs_z\", display_name=\"TFS Z\", info=\"Tail free sampling value. (Default: 1)\", advanced=True),\n IntInput(name=\"timeout\", display_name=\"Timeout\", info=\"Timeout for the request stream.\", advanced=True),\n IntInput(\n name=\"top_k\", display_name=\"Top K\", info=\"Limits token selection to top K. (Default: 40)\", advanced=True\n ),\n FloatInput(name=\"top_p\", display_name=\"Top P\", info=\"Works together with top-k. (Default: 0.9)\", advanced=True),\n BoolInput(name=\"verbose\", display_name=\"Verbose\", info=\"Whether to print out response text.\", advanced=True),\n MessageTextInput(\n name=\"tags\",\n display_name=\"Tags\",\n info=\"Comma-separated list of tags to add to the run trace.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"stop_tokens\",\n display_name=\"Stop Tokens\",\n info=\"Comma-separated list of tokens to signal the model to stop generating text.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"system\", display_name=\"System\", info=\"System to use for generating text.\", advanced=True\n ),\n BoolInput(\n name=\"tool_model_enabled\",\n display_name=\"Tool Model Enabled\",\n info=\"Whether to enable tool calling in the model.\",\n value=True,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"template\", display_name=\"Template\", info=\"Template to use for generating text.\", advanced=True\n ),\n *LCModelComponent._base_inputs,\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # Mapping mirostat settings to their corresponding values\n mirostat_options = {\"Mirostat\": 1, \"Mirostat 2.0\": 2}\n\n # Default to 0 for 'Disabled'\n mirostat_value = mirostat_options.get(self.mirostat, 0)\n\n # Set mirostat_eta and mirostat_tau to None if mirostat is disabled\n if mirostat_value == 0:\n mirostat_eta = None\n mirostat_tau = None\n else:\n mirostat_eta = self.mirostat_eta\n mirostat_tau = self.mirostat_tau\n\n # Mapping system settings to their corresponding values\n llm_params = {\n \"base_url\": self.base_url,\n \"model\": self.model_name,\n \"mirostat\": mirostat_value,\n \"format\": self.format,\n \"metadata\": self.metadata,\n \"tags\": self.tags.split(\",\") if self.tags else None,\n \"mirostat_eta\": mirostat_eta,\n \"mirostat_tau\": mirostat_tau,\n \"num_ctx\": self.num_ctx or None,\n \"num_gpu\": self.num_gpu or None,\n \"num_thread\": self.num_thread or None,\n \"repeat_last_n\": self.repeat_last_n or None,\n \"repeat_penalty\": self.repeat_penalty or None,\n \"temperature\": self.temperature or None,\n \"stop\": self.stop_tokens.split(\",\") if self.stop_tokens else None,\n \"system\": self.system,\n \"tfs_z\": self.tfs_z or None,\n \"timeout\": self.timeout or None,\n \"top_k\": self.top_k or None,\n \"top_p\": self.top_p or None,\n \"verbose\": self.verbose,\n \"template\": self.template,\n }\n\n # Remove parameters with None values\n llm_params = {k: v for k, v in llm_params.items() if v is not None}\n\n try:\n output = ChatOllama(**llm_params)\n except Exception as e:\n msg = (\n \"Unable to connect to the Ollama API. \",\n \"Please verify the base URL, ensure the relevant Ollama model is pulled, and try again.\",\n )\n raise ValueError(msg) from e\n\n return output\n\n async def is_valid_ollama_url(self, url: str) -> bool:\n try:\n async with httpx.AsyncClient() as client:\n return (await client.get(urljoin(url, \"api/tags\"))).status_code == HTTP_STATUS_OK\n except httpx.RequestError:\n return False\n\n async def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None):\n if field_name == \"mirostat\":\n if field_value == \"Disabled\":\n build_config[\"mirostat_eta\"][\"advanced\"] = True\n build_config[\"mirostat_tau\"][\"advanced\"] = True\n build_config[\"mirostat_eta\"][\"value\"] = None\n build_config[\"mirostat_tau\"][\"value\"] = None\n\n else:\n build_config[\"mirostat_eta\"][\"advanced\"] = False\n build_config[\"mirostat_tau\"][\"advanced\"] = False\n\n if field_value == \"Mirostat 2.0\":\n build_config[\"mirostat_eta\"][\"value\"] = 0.2\n build_config[\"mirostat_tau\"][\"value\"] = 10\n else:\n build_config[\"mirostat_eta\"][\"value\"] = 0.1\n build_config[\"mirostat_tau\"][\"value\"] = 5\n\n if field_name in {\"base_url\", \"model_name\"}:\n if build_config[\"base_url\"].get(\"load_from_db\", False):\n base_url_value = await self.get_variables(build_config[\"base_url\"].get(\"value\", \"\"), \"base_url\")\n else:\n base_url_value = build_config[\"base_url\"].get(\"value\", \"\")\n\n if not await self.is_valid_ollama_url(base_url_value):\n # Check if any URL in the list is valid\n valid_url = \"\"\n check_urls = URL_LIST\n if self.base_url:\n check_urls = [self.base_url, *URL_LIST]\n for url in check_urls:\n if await self.is_valid_ollama_url(url):\n valid_url = url\n break\n if valid_url != \"\":\n build_config[\"base_url\"][\"value\"] = valid_url\n else:\n msg = \"No valid Ollama URL found.\"\n raise ValueError(msg)\n if field_name in {\"model_name\", \"base_url\", \"tool_model_enabled\"}:\n if await self.is_valid_ollama_url(self.base_url):\n tool_model_enabled = build_config[\"tool_model_enabled\"].get(\"value\", False) or self.tool_model_enabled\n build_config[\"model_name\"][\"options\"] = await self.get_models(\n self.base_url, tool_model_enabled=tool_model_enabled\n )\n elif await self.is_valid_ollama_url(build_config[\"base_url\"].get(\"value\", \"\")):\n tool_model_enabled = build_config[\"tool_model_enabled\"].get(\"value\", False) or self.tool_model_enabled\n build_config[\"model_name\"][\"options\"] = await self.get_models(\n build_config[\"base_url\"].get(\"value\", \"\"), tool_model_enabled=tool_model_enabled\n )\n else:\n build_config[\"model_name\"][\"options\"] = []\n if field_name == \"keep_alive_flag\":\n if field_value == \"Keep\":\n build_config[\"keep_alive\"][\"value\"] = \"-1\"\n build_config[\"keep_alive\"][\"advanced\"] = True\n elif field_value == \"Immediately\":\n build_config[\"keep_alive\"][\"value\"] = \"0\"\n build_config[\"keep_alive\"][\"advanced\"] = True\n else:\n build_config[\"keep_alive\"][\"advanced\"] = False\n\n return build_config\n\n async def get_models(self, base_url_value: str, *, tool_model_enabled: bool | None = None) -> list[str]:\n \"\"\"Fetches a list of models from the Ollama API that do not have the \"embedding\" capability.\n\n Args:\n base_url_value (str): The base URL of the Ollama API.\n tool_model_enabled (bool | None, optional): If True, filters the models further to include\n only those that support tool calling. Defaults to None.\n\n Returns:\n list[str]: A list of model names that do not have the \"embedding\" capability. If\n `tool_model_enabled` is True, only models supporting tool calling are included.\n\n Raises:\n ValueError: If there is an issue with the API request or response, or if the model\n names cannot be retrieved.\n \"\"\"\n try:\n # Normalize the base URL to avoid the repeated \"/\" at the end\n base_url = base_url_value.rstrip(\"/\") + \"/\"\n\n # Ollama REST API to return models\n tags_url = urljoin(base_url, \"api/tags\")\n\n # Ollama REST API to return model capabilities\n show_url = urljoin(base_url, \"api/show\")\n\n async with httpx.AsyncClient() as client:\n # Fetch available models\n tags_response = await client.get(tags_url)\n tags_response.raise_for_status()\n models = tags_response.json()\n if asyncio.iscoroutine(models):\n models = await models\n await logger.adebug(f\"Available models: {models}\")\n\n # Filter models that are NOT embedding models\n model_ids = []\n for model in models[self.JSON_MODELS_KEY]:\n model_name = model[self.JSON_NAME_KEY]\n await logger.adebug(f\"Checking model: {model_name}\")\n\n payload = {\"model\": model_name}\n show_response = await client.post(show_url, json=payload)\n show_response.raise_for_status()\n json_data = show_response.json()\n if asyncio.iscoroutine(json_data):\n json_data = await json_data\n capabilities = json_data.get(self.JSON_CAPABILITIES_KEY, [])\n await logger.adebug(f\"Model: {model_name}, Capabilities: {capabilities}\")\n\n if self.DESIRED_CAPABILITY in capabilities and (\n not tool_model_enabled or self.TOOL_CALLING_CAPABILITY in capabilities\n ):\n model_ids.append(model_name)\n\n except (httpx.RequestError, ValueError) as e:\n msg = \"Could not get model names from Ollama.\"\n raise ValueError(msg) from e\n\n return model_ids\n" + }, + "format": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "Format", + "dynamic": false, + "info": "Specify the format of the output (e.g., json).", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "format", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "type": "str", + "value": "" + }, + "input_value": { + "_input_type": "MessageInput", + "advanced": false, + "display_name": "Input", + "dynamic": false, + "info": "", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "input_value", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "type": "str", + "value": "" + }, + "metadata": { + "_input_type": "DictInput", + "advanced": true, + "display_name": "Metadata", + "dynamic": false, + "info": "Metadata to add to the run trace.", + "list": false, + "list_add_label": "Add More", + "name": "metadata", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "type": "dict", + "value": {} + }, + "mirostat": { + "_input_type": "DropdownInput", + "advanced": true, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Mirostat", + "dynamic": false, + "info": "Enable/disable Mirostat sampling for controlling perplexity.", + "name": "mirostat", + "options": [ + "Disabled", + "Mirostat", + "Mirostat 2.0" + ], + "options_metadata": [], + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "str", + "value": "Disabled" + }, + "mirostat_eta": { + "_input_type": "FloatInput", + "advanced": true, + "display_name": "Mirostat Eta", + "dynamic": false, + "info": "Learning rate for Mirostat algorithm. (Default: 0.1)", + "list": false, + "list_add_label": "Add More", + "name": "mirostat_eta", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "float", + "value": "" + }, + "mirostat_tau": { + "_input_type": "FloatInput", + "advanced": true, + "display_name": "Mirostat Tau", + "dynamic": false, + "info": "Controls the balance between coherence and diversity of the output. (Default: 5.0)", + "list": false, + "list_add_label": "Add More", + "name": "mirostat_tau", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "float", + "value": "" + }, + "model_name": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Model Name", + "dynamic": false, + "info": "Refer to https://ollama.com/library for more models.", + "name": "model_name", + "options": [ + "qwen3:4b" + ], + "options_metadata": [], + "placeholder": "", + "real_time_refresh": true, + "refresh_button": true, + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "str", + "value": "qwen3:4b" + }, + "num_ctx": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Context Window Size", + "dynamic": false, + "info": "Size of the context window for generating tokens. (Default: 2048)", + "list": false, + "list_add_label": "Add More", + "name": "num_ctx", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "int", + "value": "" + }, + "num_gpu": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Number of GPUs", + "dynamic": false, + "info": "Number of GPUs to use for computation. (Default: 1 on macOS, 0 to disable)", + "list": false, + "list_add_label": "Add More", + "name": "num_gpu", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "int", + "value": "" + }, + "num_thread": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Number of Threads", + "dynamic": false, + "info": "Number of threads to use during computation. (Default: detected for optimal performance)", + "list": false, + "list_add_label": "Add More", + "name": "num_thread", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "int", + "value": "" + }, + "repeat_last_n": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Repeat Last N", + "dynamic": false, + "info": "How far back the model looks to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)", + "list": false, + "list_add_label": "Add More", + "name": "repeat_last_n", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "int", + "value": "" + }, + "repeat_penalty": { + "_input_type": "FloatInput", + "advanced": true, + "display_name": "Repeat Penalty", + "dynamic": false, + "info": "Penalty for repetitions in generated text. (Default: 1.1)", + "list": false, + "list_add_label": "Add More", + "name": "repeat_penalty", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "float", + "value": "" + }, + "stop_tokens": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "Stop Tokens", + "dynamic": false, + "info": "Comma-separated list of tokens to signal the model to stop generating text.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "stop_tokens", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "type": "str", + "value": "" + }, + "stream": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Stream", + "dynamic": false, + "info": "Stream the response from the model. Streaming works only in Chat.", + "list": false, + "list_add_label": "Add More", + "name": "stream", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "bool", + "value": false + }, + "system": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "System", + "dynamic": false, + "info": "System to use for generating text.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "system", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "type": "str", + "value": "" + }, + "system_message": { + "_input_type": "MultilineInput", + "advanced": false, + "copy_field": false, + "display_name": "System Message", + "dynamic": false, + "info": "System message to pass to the model.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "multiline": true, + "name": "system_message", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "type": "str", + "value": "" + }, + "tags": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "Tags", + "dynamic": false, + "info": "Comma-separated list of tags to add to the run trace.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "tags", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "type": "str", + "value": "" + }, + "temperature": { + "_input_type": "SliderInput", + "advanced": true, + "display_name": "Temperature", + "dynamic": false, + "info": "", + "max_label": "", + "max_label_icon": "", + "min_label": "", + "min_label_icon": "", + "name": "temperature", + "placeholder": "", + "range_spec": { + "max": 1, + "min": 0, + "step": 0.01, + "step_type": "float" + }, + "required": false, + "show": true, + "slider_buttons": false, + "slider_buttons_options": [], + "slider_input": false, + "title_case": false, + "tool_mode": false, + "type": "slider", + "value": 0.1 + }, + "template": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "Template", + "dynamic": false, + "info": "Template to use for generating text.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "template", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "type": "str", + "value": "" + }, + "tfs_z": { + "_input_type": "FloatInput", + "advanced": true, + "display_name": "TFS Z", + "dynamic": false, + "info": "Tail free sampling value. (Default: 1)", + "list": false, + "list_add_label": "Add More", + "name": "tfs_z", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "float", + "value": "" + }, + "timeout": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Timeout", + "dynamic": false, + "info": "Timeout for the request stream.", + "list": false, + "list_add_label": "Add More", + "name": "timeout", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "int", + "value": "" + }, + "tool_model_enabled": { + "_input_type": "BoolInput", + "advanced": false, + "display_name": "Tool Model Enabled", + "dynamic": false, + "info": "Whether to enable tool calling in the model.", + "list": false, + "list_add_label": "Add More", + "name": "tool_model_enabled", + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "bool", + "value": true + }, + "top_k": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Top K", + "dynamic": false, + "info": "Limits token selection to top K. (Default: 40)", + "list": false, + "list_add_label": "Add More", + "name": "top_k", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "int", + "value": "" + }, + "top_p": { + "_input_type": "FloatInput", + "advanced": true, + "display_name": "Top P", + "dynamic": false, + "info": "Works together with top-k. (Default: 0.9)", + "list": false, + "list_add_label": "Add More", + "name": "top_p", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "float", + "value": "" + }, + "verbose": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Verbose", + "dynamic": false, + "info": "Whether to print out response text.", + "list": false, + "list_add_label": "Add More", + "name": "verbose", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "bool", + "value": false + } + }, + "tool_mode": false + }, + "selected_output": "text_output", + "showNode": true, + "type": "OllamaModel" + }, + "dragging": false, + "id": "OllamaModel-XDGqZ", + "measured": { + "height": 494, + "width": 320 + }, + "position": { + "x": 248.08287272472313, + "y": 216.98088326271431 + }, + "selected": false, + "type": "genericNode" + } \ No newline at end of file diff --git a/flows/components/watsonx_embedding.json b/flows/components/watsonx_embedding.json index 447b37f5..850cfb07 100644 --- a/flows/components/watsonx_embedding.json +++ b/flows/components/watsonx_embedding.json @@ -2,7 +2,9 @@ "data": { "id": "WatsonxEmbeddingsComponent-pJfXI", "node": { - "base_classes": ["Embeddings"], + "base_classes": [ + "Embeddings" + ], "beta": false, "conditional_paths": [], "custom_fields": {}, @@ -20,6 +22,7 @@ ], "frozen": false, "icon": "WatsonxAI", + "last_updated": "2025-09-22T20:11:38.181Z", "legacy": false, "metadata": { "code_hash": "b6c6d50cc7ed", @@ -43,7 +46,7 @@ }, { "name": "langflow", - "version": "1.5.0.post2" + "version": null } ], "total_dependencies": 5 @@ -60,9 +63,13 @@ "group_outputs": false, "method": "build_embeddings", "name": "embeddings", + "options": null, + "required_inputs": null, "selected": "Embeddings", "tool_mode": true, - "types": ["Embeddings"], + "types": [ + "Embeddings" + ], "value": "__UNDEFINED__" } ], @@ -131,7 +138,16 @@ "dynamic": true, "info": "", "name": "model_name", - "options": [], + "options": [ + "ibm/granite-embedding-107m-multilingual", + "ibm/granite-embedding-278m-multilingual", + "ibm/slate-125m-english-rtrvr", + "ibm/slate-125m-english-rtrvr-v2", + "ibm/slate-30m-english-rtrvr", + "ibm/slate-30m-english-rtrvr-v2", + "intfloat/multilingual-e5-large", + "sentence-transformers/all-minilm-l6-v2" + ], "options_metadata": [], "placeholder": "", "required": true, @@ -140,7 +156,8 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, - "type": "str" + "type": "str", + "value": "ibm/granite-embedding-107m-multilingual" }, "project_id": { "_input_type": "StrInput", @@ -205,7 +222,8 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, - "type": "str" + "type": "str", + "value": "https://us-south.ml.cloud.ibm.com" } }, "tool_mode": false @@ -213,15 +231,16 @@ "showNode": true, "type": "WatsonxEmbeddingsComponent" }, + "dragging": false, "id": "WatsonxEmbeddingsComponent-pJfXI", "measured": { "height": 467, "width": 320 }, "position": { - "x": 999.129592360849, - "y": 753.2332292351236 + "x": 364.4406919374723, + "y": 282.29319267029086 }, "selected": false, "type": "genericNode" -} +} \ No newline at end of file diff --git a/flows/components/watsonx_llm.json b/flows/components/watsonx_llm.json index 555c9d59..99a4a936 100644 --- a/flows/components/watsonx_llm.json +++ b/flows/components/watsonx_llm.json @@ -2,7 +2,10 @@ "data": { "id": "IBMwatsonxModel-jA4Nw", "node": { - "base_classes": ["LanguageModel", "Message"], + "base_classes": [ + "LanguageModel", + "Message" + ], "beta": false, "conditional_paths": [], "custom_fields": {}, @@ -31,6 +34,7 @@ ], "frozen": false, "icon": "WatsonxAI", + "last_updated": "2025-09-22T20:03:31.248Z", "legacy": false, "metadata": { "code_hash": "7767fd69a954", @@ -50,12 +54,17 @@ }, { "name": "langflow", - "version": "1.5.0.post2" + "version": null } ], "total_dependencies": 4 }, - "keywords": ["model", "llm", "language model", "large language model"], + "keywords": [ + "model", + "llm", + "language model", + "large language model" + ], "module": "langflow.components.ibm.watsonx.WatsonxAIComponent" }, "minimized": false, @@ -68,8 +77,12 @@ "group_outputs": false, "method": "text_response", "name": "text_output", + "options": null, + "required_inputs": null, "tool_mode": true, - "types": ["Message"], + "types": [ + "Message" + ], "value": "__UNDEFINED__" }, { @@ -79,9 +92,13 @@ "group_outputs": false, "method": "build_model", "name": "model_output", + "options": null, + "required_inputs": null, "selected": "LanguageModel", "tool_mode": true, - "types": ["LanguageModel"], + "types": [ + "LanguageModel" + ], "value": "__UNDEFINED__" } ], @@ -157,7 +174,9 @@ "display_name": "Input", "dynamic": false, "info": "", - "input_types": ["Message"], + "input_types": [ + "Message" + ], "list": false, "list_add_label": "Add More", "load_from_db": false, @@ -242,7 +261,26 @@ "dynamic": true, "info": "", "name": "model_name", - "options": [], + "options": [ + "ibm/granite-3-2-8b-instruct", + "ibm/granite-3-2b-instruct", + "ibm/granite-3-3-8b-instruct", + "ibm/granite-3-8b-instruct", + "ibm/granite-guardian-3-2b", + "ibm/granite-guardian-3-8b", + "ibm/granite-vision-3-2-2b", + "meta-llama/llama-3-2-11b-vision-instruct", + "meta-llama/llama-3-2-90b-vision-instruct", + "meta-llama/llama-3-3-70b-instruct", + "meta-llama/llama-3-405b-instruct", + "meta-llama/llama-4-maverick-17b-128e-instruct-fp8", + "meta-llama/llama-guard-3-11b-vision", + "mistralai/mistral-large", + "mistralai/mistral-medium-2505", + "mistralai/mistral-small-3-1-24b-instruct-2503", + "mistralai/pixtral-12b", + "openai/gpt-oss-120b" + ], "options_metadata": [], "placeholder": "", "required": true, @@ -251,7 +289,8 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, - "type": "str" + "type": "str", + "value": "ibm/granite-3-2-8b-instruct" }, "presence_penalty": { "_input_type": "SliderInput", @@ -362,7 +401,9 @@ "display_name": "System Message", "dynamic": false, "info": "System message to pass to the model.", - "input_types": ["Message"], + "input_types": [ + "Message" + ], "list": false, "list_add_label": "Add More", "load_from_db": false, @@ -484,7 +525,8 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, - "type": "str" + "type": "str", + "value": "https://us-south.ml.cloud.ibm.com" } }, "tool_mode": false @@ -493,15 +535,16 @@ "showNode": true, "type": "IBMwatsonxModel" }, + "dragging": false, "id": "IBMwatsonxModel-jA4Nw", "measured": { "height": 632, "width": 320 }, "position": { - "x": 562.2658900512183, - "y": 895.3455179382565 + "x": 371.93566807042805, + "y": 197.47711431325635 }, "selected": false, "type": "genericNode" -} +} \ No newline at end of file diff --git a/flows/components/watsonx_llm_text.json b/flows/components/watsonx_llm_text.json new file mode 100644 index 00000000..a2966a48 --- /dev/null +++ b/flows/components/watsonx_llm_text.json @@ -0,0 +1,551 @@ +{ + "data": { + "id": "IBMwatsonxModel-18kmA", + "node": { + "base_classes": [ + "LanguageModel", + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Generate text using IBM watsonx.ai foundation models.", + "display_name": "IBM watsonx.ai", + "documentation": "", + "edited": false, + "field_order": [ + "input_value", + "system_message", + "stream", + "url", + "project_id", + "api_key", + "model_name", + "max_tokens", + "stop_sequence", + "temperature", + "top_p", + "frequency_penalty", + "presence_penalty", + "seed", + "logprobs", + "top_logprobs", + "logit_bias" + ], + "frozen": false, + "icon": "WatsonxAI", + "last_updated": "2025-09-22T20:03:31.248Z", + "legacy": false, + "metadata": { + "code_hash": "7767fd69a954", + "dependencies": { + "dependencies": [ + { + "name": "requests", + "version": "2.32.5" + }, + { + "name": "langchain_ibm", + "version": "0.3.16" + }, + { + "name": "pydantic", + "version": "2.10.6" + }, + { + "name": "langflow", + "version": null + } + ], + "total_dependencies": 4 + }, + "keywords": [ + "model", + "llm", + "language model", + "large language model" + ], + "module": "langflow.components.ibm.watsonx.WatsonxAIComponent" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Model Response", + "group_outputs": false, + "method": "text_response", + "name": "text_output", + "options": null, + "required_inputs": null, + "selected": "Message", + "tool_mode": true, + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + }, + { + "allows_loop": false, + "cache": true, + "display_name": "Language Model", + "group_outputs": false, + "method": "build_model", + "name": "model_output", + "options": null, + "required_inputs": null, + "selected": "LanguageModel", + "tool_mode": true, + "types": [ + "LanguageModel" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_type": "Component", + "api_key": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "API Key", + "dynamic": false, + "info": "The API Key to use for the model.", + "input_types": [], + "load_from_db": true, + "name": "api_key", + "password": true, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "str", + "value": "WATSONX_API_KEY" + }, + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "import json\nfrom typing import Any\n\nimport requests\nfrom langchain_ibm import ChatWatsonx\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging.logger import logger\nfrom langflow.schema.dotdict import dotdict\n\n\nclass WatsonxAIComponent(LCModelComponent):\n display_name = \"IBM watsonx.ai\"\n description = \"Generate text using IBM watsonx.ai foundation models.\"\n icon = \"WatsonxAI\"\n name = \"IBMwatsonxModel\"\n beta = False\n\n _default_models = [\"ibm/granite-3-2b-instruct\", \"ibm/granite-3-8b-instruct\", \"ibm/granite-13b-instruct-v2\"]\n\n inputs = [\n *LCModelComponent._base_inputs,\n DropdownInput(\n name=\"url\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API.\",\n value=None,\n options=[\n \"https://us-south.ml.cloud.ibm.com\",\n \"https://eu-de.ml.cloud.ibm.com\",\n \"https://eu-gb.ml.cloud.ibm.com\",\n \"https://au-syd.ml.cloud.ibm.com\",\n \"https://jp-tok.ml.cloud.ibm.com\",\n \"https://ca-tor.ml.cloud.ibm.com\",\n ],\n real_time_refresh=True,\n ),\n StrInput(\n name=\"project_id\",\n display_name=\"watsonx Project ID\",\n required=True,\n info=\"The project ID or deployment space ID that is associated with the foundation model.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"The API Key to use for the model.\",\n required=True,\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=[],\n value=None,\n dynamic=True,\n required=True,\n ),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate.\",\n range_spec=RangeSpec(min=1, max=4096),\n value=1000,\n ),\n StrInput(\n name=\"stop_sequence\",\n display_name=\"Stop Sequence\",\n advanced=True,\n info=\"Sequence where generation should stop.\",\n field_type=\"str\",\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n info=\"Controls randomness, higher values increase diversity.\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=2, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"top_p\",\n display_name=\"Top P\",\n info=\"The cumulative probability cutoff for token selection. \"\n \"Lower values mean sampling from a smaller, more top-weighted nucleus.\",\n value=0.9,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"frequency_penalty\",\n display_name=\"Frequency Penalty\",\n info=\"Penalty for frequency of token usage.\",\n value=0.5,\n range_spec=RangeSpec(min=-2.0, max=2.0, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"presence_penalty\",\n display_name=\"Presence Penalty\",\n info=\"Penalty for token presence in prior text.\",\n value=0.3,\n range_spec=RangeSpec(min=-2.0, max=2.0, step=0.01),\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Random Seed\",\n advanced=True,\n info=\"The random seed for the model.\",\n value=8,\n ),\n BoolInput(\n name=\"logprobs\",\n display_name=\"Log Probabilities\",\n advanced=True,\n info=\"Whether to return log probabilities of the output tokens.\",\n value=True,\n ),\n IntInput(\n name=\"top_logprobs\",\n display_name=\"Top Log Probabilities\",\n advanced=True,\n info=\"Number of most likely tokens to return at each position.\",\n value=3,\n range_spec=RangeSpec(min=1, max=20),\n ),\n StrInput(\n name=\"logit_bias\",\n display_name=\"Logit Bias\",\n advanced=True,\n info='JSON string of token IDs to bias or suppress (e.g., {\"1003\": -100, \"1004\": 100}).',\n field_type=\"str\",\n ),\n ]\n\n @staticmethod\n def fetch_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\"version\": \"2024-09-16\", \"filters\": \"function_text_chat,!lifecycle_withdrawn\"}\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models. Using default models.\")\n return WatsonxAIComponent._default_models\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):\n \"\"\"Update model options when URL or API key changes.\"\"\"\n logger.info(\"Updating build config. Field name: %s, Field value: %s\", field_name, field_value)\n\n if field_name == \"url\" and field_value:\n try:\n models = self.fetch_models(base_url=build_config.url.value)\n build_config.model_name.options = models\n if build_config.model_name.value:\n build_config.model_name.value = models[0]\n info_message = f\"Updated model options: {len(models)} models found in {build_config.url.value}\"\n logger.info(info_message)\n except Exception: # noqa: BLE001\n logger.exception(\"Error updating model options.\")\n\n def build_model(self) -> LanguageModel:\n # Parse logit_bias from JSON string if provided\n logit_bias = None\n if hasattr(self, \"logit_bias\") and self.logit_bias:\n try:\n logit_bias = json.loads(self.logit_bias)\n except json.JSONDecodeError:\n logger.warning(\"Invalid logit_bias JSON format. Using default instead.\")\n logit_bias = {\"1003\": -100, \"1004\": -100}\n\n chat_params = {\n \"max_tokens\": getattr(self, \"max_tokens\", None),\n \"temperature\": getattr(self, \"temperature\", None),\n \"top_p\": getattr(self, \"top_p\", None),\n \"frequency_penalty\": getattr(self, \"frequency_penalty\", None),\n \"presence_penalty\": getattr(self, \"presence_penalty\", None),\n \"seed\": getattr(self, \"seed\", None),\n \"stop\": [self.stop_sequence] if self.stop_sequence else [],\n \"n\": 1,\n \"logprobs\": getattr(self, \"logprobs\", True),\n \"top_logprobs\": getattr(self, \"top_logprobs\", None),\n \"time_limit\": 600000,\n \"logit_bias\": logit_bias,\n }\n\n return ChatWatsonx(\n apikey=SecretStr(self.api_key).get_secret_value(),\n url=self.url,\n project_id=self.project_id,\n model_id=self.model_name,\n params=chat_params,\n streaming=self.stream,\n )\n" + }, + "frequency_penalty": { + "_input_type": "SliderInput", + "advanced": true, + "display_name": "Frequency Penalty", + "dynamic": false, + "info": "Penalty for frequency of token usage.", + "max_label": "", + "max_label_icon": "", + "min_label": "", + "min_label_icon": "", + "name": "frequency_penalty", + "placeholder": "", + "range_spec": { + "max": 2, + "min": -2, + "step": 0.01, + "step_type": "float" + }, + "required": false, + "show": true, + "slider_buttons": false, + "slider_buttons_options": [], + "slider_input": false, + "title_case": false, + "tool_mode": false, + "type": "slider", + "value": 0.5 + }, + "input_value": { + "_input_type": "MessageInput", + "advanced": false, + "display_name": "Input", + "dynamic": false, + "info": "", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "input_value", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "type": "str", + "value": "" + }, + "logit_bias": { + "_input_type": "StrInput", + "advanced": true, + "display_name": "Logit Bias", + "dynamic": false, + "info": "JSON string of token IDs to bias or suppress (e.g., {\"1003\": -100, \"1004\": 100}).", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "logit_bias", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "str", + "value": "" + }, + "logprobs": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Log Probabilities", + "dynamic": false, + "info": "Whether to return log probabilities of the output tokens.", + "list": false, + "list_add_label": "Add More", + "name": "logprobs", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "bool", + "value": true + }, + "max_tokens": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Max Tokens", + "dynamic": false, + "info": "The maximum number of tokens to generate.", + "list": false, + "list_add_label": "Add More", + "name": "max_tokens", + "placeholder": "", + "range_spec": { + "max": 4096, + "min": 1, + "step": 0.1, + "step_type": "float" + }, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "int", + "value": 1000 + }, + "model_name": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Model Name", + "dynamic": true, + "info": "", + "name": "model_name", + "options": [ + "ibm/granite-3-2-8b-instruct", + "ibm/granite-3-2b-instruct", + "ibm/granite-3-3-8b-instruct", + "ibm/granite-3-8b-instruct", + "ibm/granite-guardian-3-2b", + "ibm/granite-guardian-3-8b", + "ibm/granite-vision-3-2-2b", + "meta-llama/llama-3-2-11b-vision-instruct", + "meta-llama/llama-3-2-90b-vision-instruct", + "meta-llama/llama-3-3-70b-instruct", + "meta-llama/llama-3-405b-instruct", + "meta-llama/llama-4-maverick-17b-128e-instruct-fp8", + "meta-llama/llama-guard-3-11b-vision", + "mistralai/mistral-large", + "mistralai/mistral-medium-2505", + "mistralai/mistral-small-3-1-24b-instruct-2503", + "mistralai/pixtral-12b", + "openai/gpt-oss-120b" + ], + "options_metadata": [], + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "str", + "value": "ibm/granite-3-2-8b-instruct" + }, + "presence_penalty": { + "_input_type": "SliderInput", + "advanced": true, + "display_name": "Presence Penalty", + "dynamic": false, + "info": "Penalty for token presence in prior text.", + "max_label": "", + "max_label_icon": "", + "min_label": "", + "min_label_icon": "", + "name": "presence_penalty", + "placeholder": "", + "range_spec": { + "max": 2, + "min": -2, + "step": 0.01, + "step_type": "float" + }, + "required": false, + "show": true, + "slider_buttons": false, + "slider_buttons_options": [], + "slider_input": false, + "title_case": false, + "tool_mode": false, + "type": "slider", + "value": 0.3 + }, + "project_id": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "watsonx Project ID", + "dynamic": false, + "info": "The project ID or deployment space ID that is associated with the foundation model.", + "list": false, + "list_add_label": "Add More", + "load_from_db": true, + "name": "project_id", + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "str", + "value": "WATSONX_PROJECT_ID" + }, + "seed": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Random Seed", + "dynamic": false, + "info": "The random seed for the model.", + "list": false, + "list_add_label": "Add More", + "name": "seed", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "int", + "value": 8 + }, + "stop_sequence": { + "_input_type": "StrInput", + "advanced": true, + "display_name": "Stop Sequence", + "dynamic": false, + "info": "Sequence where generation should stop.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "stop_sequence", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "str", + "value": "" + }, + "stream": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Stream", + "dynamic": false, + "info": "Stream the response from the model. Streaming works only in Chat.", + "list": false, + "list_add_label": "Add More", + "name": "stream", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "bool", + "value": false + }, + "system_message": { + "_input_type": "MultilineInput", + "advanced": false, + "copy_field": false, + "display_name": "System Message", + "dynamic": false, + "info": "System message to pass to the model.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "multiline": true, + "name": "system_message", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "type": "str", + "value": "" + }, + "temperature": { + "_input_type": "SliderInput", + "advanced": true, + "display_name": "Temperature", + "dynamic": false, + "info": "Controls randomness, higher values increase diversity.", + "max_label": "", + "max_label_icon": "", + "min_label": "", + "min_label_icon": "", + "name": "temperature", + "placeholder": "", + "range_spec": { + "max": 2, + "min": 0, + "step": 0.01, + "step_type": "float" + }, + "required": false, + "show": true, + "slider_buttons": false, + "slider_buttons_options": [], + "slider_input": false, + "title_case": false, + "tool_mode": false, + "type": "slider", + "value": 0.1 + }, + "top_logprobs": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Top Log Probabilities", + "dynamic": false, + "info": "Number of most likely tokens to return at each position.", + "list": false, + "list_add_label": "Add More", + "name": "top_logprobs", + "placeholder": "", + "range_spec": { + "max": 20, + "min": 1, + "step": 0.1, + "step_type": "float" + }, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "int", + "value": 3 + }, + "top_p": { + "_input_type": "SliderInput", + "advanced": true, + "display_name": "Top P", + "dynamic": false, + "info": "The cumulative probability cutoff for token selection. Lower values mean sampling from a smaller, more top-weighted nucleus.", + "max_label": "", + "max_label_icon": "", + "min_label": "", + "min_label_icon": "", + "name": "top_p", + "placeholder": "", + "range_spec": { + "max": 1, + "min": 0, + "step": 0.01, + "step_type": "float" + }, + "required": false, + "show": true, + "slider_buttons": false, + "slider_buttons_options": [], + "slider_input": false, + "title_case": false, + "tool_mode": false, + "type": "slider", + "value": 0.9 + }, + "url": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "watsonx API Endpoint", + "dynamic": false, + "info": "The base URL of the API.", + "name": "url", + "options": [ + "https://us-south.ml.cloud.ibm.com", + "https://eu-de.ml.cloud.ibm.com", + "https://eu-gb.ml.cloud.ibm.com", + "https://au-syd.ml.cloud.ibm.com", + "https://jp-tok.ml.cloud.ibm.com", + "https://ca-tor.ml.cloud.ibm.com" + ], + "options_metadata": [], + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "str", + "value": "https://us-south.ml.cloud.ibm.com" + } + }, + "tool_mode": false + }, + "selected_output": "text_output", + "showNode": true, + "type": "IBMwatsonxModel" + }, + "dragging": false, + "id": "IBMwatsonxModel-18kmA", + "measured": { + "height": 632, + "width": 320 + }, + "position": { + "x": 370.8989669694083, + "y": 184 + }, + "selected": true, + "type": "genericNode" + } \ No newline at end of file diff --git a/flows/openrag_nudges.json b/flows/openrag_nudges.json index 4f5e775a..77839d0b 100644 --- a/flows/openrag_nudges.json +++ b/flows/openrag_nudges.json @@ -29,36 +29,6 @@ "target": "OpenSearch-iYfjf", "targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearch-iYfjfœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}" }, - { - "animated": false, - "className": "", - "data": { - "sourceHandle": { - "dataType": "LanguageModelComponent", - "id": "LanguageModelComponent-0YME7", - "name": "text_output", - "output_types": [ - "Message" - ] - }, - "targetHandle": { - "fieldName": "input_value", - "id": "ChatOutput-BMVN5", - "inputTypes": [ - "Data", - "DataFrame", - "Message" - ], - "type": "other" - } - }, - "id": "xy-edge__LanguageModelComponent-0YME7{œdataTypeœ:œLanguageModelComponentœ,œidœ:œLanguageModelComponent-0YME7œ,œnameœ:œtext_outputœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-BMVN5{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-BMVN5œ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}", - "selected": false, - "source": "LanguageModelComponent-0YME7", - "sourceHandle": "{œdataTypeœ:œLanguageModelComponentœ,œidœ:œLanguageModelComponent-0YME7œ,œnameœ:œtext_outputœ,œoutput_typesœ:[œMessageœ]}", - "target": "ChatOutput-BMVN5", - "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-BMVN5œ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}" - }, { "animated": false, "className": "", @@ -87,34 +57,6 @@ "target": "Prompt Template-Wo6kR", "targetHandle": "{œfieldNameœ:œdocsœ,œidœ:œPrompt Template-Wo6kRœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" }, - { - "animated": false, - "className": "", - "data": { - "sourceHandle": { - "dataType": "Prompt Template", - "id": "Prompt Template-Wo6kR", - "name": "prompt", - "output_types": [ - "Message" - ] - }, - "targetHandle": { - "fieldName": "input_value", - "id": "LanguageModelComponent-0YME7", - "inputTypes": [ - "Message" - ], - "type": "str" - } - }, - "id": "xy-edge__Prompt Template-Wo6kR{œdataTypeœ:œPrompt Templateœ,œidœ:œPrompt Template-Wo6kRœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}-LanguageModelComponent-0YME7{œfieldNameœ:œinput_valueœ,œidœ:œLanguageModelComponent-0YME7œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", - "selected": false, - "source": "Prompt Template-Wo6kR", - "sourceHandle": "{œdataTypeœ:œPrompt Templateœ,œidœ:œPrompt Template-Wo6kRœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}", - "target": "LanguageModelComponent-0YME7", - "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œLanguageModelComponent-0YME7œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" - }, { "animated": false, "className": "", @@ -146,6 +88,7 @@ }, { "animated": false, + "className": "", "data": { "sourceHandle": { "dataType": "ChatInput", @@ -170,6 +113,62 @@ "sourceHandle": "{œdataTypeœ:œChatInputœ,œidœ:œChatInput-bqH7Hœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}", "target": "Prompt Template-Wo6kR", "targetHandle": "{œfieldNameœ:œpromptœ,œidœ:œPrompt Template-Wo6kRœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" + }, + { + "animated": false, + "data": { + "sourceHandle": { + "dataType": "LanguageModelComponent", + "id": "LanguageModelComponent-NSTA6", + "name": "text_output", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "input_value", + "id": "ChatOutput-BMVN5", + "inputTypes": [ + "Data", + "DataFrame", + "Message" + ], + "type": "other" + } + }, + "id": "xy-edge__LanguageModelComponent-NSTA6{œdataTypeœ:œLanguageModelComponentœ,œidœ:œLanguageModelComponent-NSTA6œ,œnameœ:œtext_outputœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-BMVN5{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-BMVN5œ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}", + "selected": false, + "source": "LanguageModelComponent-NSTA6", + "sourceHandle": "{œdataTypeœ:œLanguageModelComponentœ,œidœ:œLanguageModelComponent-NSTA6œ,œnameœ:œtext_outputœ,œoutput_typesœ:[œMessageœ]}", + "target": "ChatOutput-BMVN5", + "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-BMVN5œ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}" + }, + { + "animated": false, + "data": { + "sourceHandle": { + "dataType": "Prompt Template", + "id": "Prompt Template-Wo6kR", + "name": "prompt", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "input_value", + "id": "LanguageModelComponent-NSTA6", + "inputTypes": [ + "Message" + ], + "type": "str" + } + }, + "id": "xy-edge__Prompt Template-Wo6kR{œdataTypeœ:œPrompt Templateœ,œidœ:œPrompt Template-Wo6kRœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}-LanguageModelComponent-NSTA6{œfieldNameœ:œinput_valueœ,œidœ:œLanguageModelComponent-NSTA6œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "selected": false, + "source": "Prompt Template-Wo6kR", + "sourceHandle": "{œdataTypeœ:œPrompt Templateœ,œidœ:œPrompt Template-Wo6kRœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}", + "target": "LanguageModelComponent-NSTA6", + "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œLanguageModelComponent-NSTA6œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" } ], "nodes": [ @@ -1299,7 +1298,7 @@ "frozen": false, "icon": "binary", "key": "EmbeddingModel", - "last_updated": "2025-09-04T20:03:12.867Z", + "last_updated": "2025-09-22T20:02:23.156Z", "legacy": false, "lf_version": "1.5.0.post2", "metadata": {}, @@ -1568,7 +1567,383 @@ }, { "data": { - "id": "LanguageModelComponent-0YME7", + "id": "Prompt Template-Wo6kR", + "node": { + "base_classes": [ + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": { + "template": [ + "prompt", + "docs" + ] + }, + "description": "Create a prompt template with dynamic variables.", + "display_name": "Prompt Template", + "documentation": "https://docs.langflow.org/components-prompts", + "edited": false, + "error": null, + "field_order": [ + "template", + "tool_placeholder" + ], + "frozen": false, + "full_path": null, + "icon": "braces", + "is_composition": null, + "is_input": null, + "is_output": null, + "legacy": false, + "metadata": { + "code_hash": "c478a572ccd3", + "dependencies": { + "dependencies": [ + { + "name": "langflow", + "version": "0.5.0.post2" + } + ], + "total_dependencies": 1 + }, + "module": "langflow.components.processing.prompt.PromptComponent" + }, + "minimized": false, + "name": "", + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Prompt", + "group_outputs": false, + "hidden": null, + "method": "build_prompt", + "name": "prompt", + "options": null, + "required_inputs": null, + "selected": "Message", + "tool_mode": true, + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "priority": 0, + "template": { + "_type": "Component", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt Template\"\n description: str = \"Create a prompt template with dynamic variables.\"\n documentation: str = \"https://docs.langflow.org/components-prompts\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt Template\"\n priority = 0 # Set priority to 0 to make it appear first\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n" + }, + "docs": { + "advanced": false, + "display_name": "docs", + "dynamic": false, + "field_type": "str", + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Message" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "docs", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "prompt": { + "advanced": false, + "display_name": "prompt", + "dynamic": false, + "field_type": "str", + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Message" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "prompt", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "template": { + "_input_type": "PromptInput", + "advanced": false, + "display_name": "Template", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "template", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "type": "prompt", + "value": "You are generating prompt nudges to help a user explore a corpus.\n\nTask:\n1) Skim the documents to infer common themes, entities, or tasks.\n2) Propose exactly three concise, distinct prompt nudges that encourage useful next queries.\n3) If the chat history is provided, use it to generate new questions that the user might have, based on the llm's response to his previous query. DO NOT repeat user questions.\n4) Make the nudges concise, close to 40 characters.\n5) The nudges are questions or commands that the user can make to the chatbot, which will respond looking at the corpus.\n4) Return strings only, separating the nudges by a newline. Don't include quotation marks.\n5) If any error occured, return blank. This will be used in production, so don't ask for more info or confirm a info like you're talking to me still. If, for some reason, you can't provide the nudges, your job failed and you just return blank.\nRules: Be brief. No duplicates. No explanations outside the strings of the nudges. English only.\n\nExamples:\n Show me this quarter's top 10 deals\n Summarize recent client interactions\n Search OpenSearch for mentions of our competitors\n\nChat history:\n{prompt}\n\nDocuments:\n{docs}\n\n" + }, + "tool_placeholder": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "Tool Placeholder", + "dynamic": false, + "info": "A placeholder input for tool mode.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "tool_placeholder", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": true, + "trace_as_input": true, + "trace_as_metadata": true, + "type": "str", + "value": "" + } + }, + "tool_mode": false + }, + "showNode": true, + "type": "Prompt Template" + }, + "dragging": false, + "id": "Prompt Template-Wo6kR", + "measured": { + "height": 449, + "width": 320 + }, + "position": { + "x": 1669.0365272581178, + "y": 712.1086273287026 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "id": "ParserComponent-tZs7s", + "node": { + "base_classes": [ + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Extracts text using a template.", + "display_name": "Parser", + "documentation": "https://docs.langflow.org/components-processing#parser", + "edited": false, + "field_order": [ + "input_data", + "mode", + "pattern", + "sep" + ], + "frozen": false, + "icon": "braces", + "legacy": false, + "metadata": { + "code_hash": "556209520650", + "dependencies": { + "dependencies": [ + { + "name": "langflow", + "version": "0.5.0.post2" + } + ], + "total_dependencies": 1 + }, + "module": "langflow.components.processing.parser.ParserComponent" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Parsed Text", + "group_outputs": false, + "method": "parse_combined_text", + "name": "parsed_text", + "selected": "Message", + "tool_mode": true, + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_type": "Component", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from langflow.custom.custom_component.component import Component\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, HandleInput, MessageTextInput, MultilineInput, TabInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.template.field.base import Output\n\n\nclass ParserComponent(Component):\n display_name = \"Parser\"\n description = \"Extracts text using a template.\"\n documentation: str = \"https://docs.langflow.org/components-processing#parser\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"input_data\",\n display_name=\"Data or DataFrame\",\n input_types=[\"DataFrame\", \"Data\"],\n info=\"Accepts either a DataFrame or a Data object.\",\n required=True,\n ),\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Parser\", \"Stringify\"],\n value=\"Parser\",\n info=\"Convert into raw string instead of using a template.\",\n real_time_refresh=True,\n ),\n MultilineInput(\n name=\"pattern\",\n display_name=\"Template\",\n info=(\n \"Use variables within curly brackets to extract column values for DataFrames \"\n \"or key values for Data.\"\n \"For example: `Name: {Name}, Age: {Age}, Country: {Country}`\"\n ),\n value=\"Text: {text}\", # Example default\n dynamic=True,\n show=True,\n required=True,\n ),\n MessageTextInput(\n name=\"sep\",\n display_name=\"Separator\",\n advanced=True,\n value=\"\\n\",\n info=\"String used to separate rows/items.\",\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"Parsed Text\",\n name=\"parsed_text\",\n info=\"Formatted text output.\",\n method=\"parse_combined_text\",\n ),\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n \"\"\"Dynamically hide/show `template` and enforce requirement based on `stringify`.\"\"\"\n if field_name == \"mode\":\n build_config[\"pattern\"][\"show\"] = self.mode == \"Parser\"\n build_config[\"pattern\"][\"required\"] = self.mode == \"Parser\"\n if field_value:\n clean_data = BoolInput(\n name=\"clean_data\",\n display_name=\"Clean Data\",\n info=(\n \"Enable to clean the data by removing empty rows and lines \"\n \"in each cell of the DataFrame/ Data object.\"\n ),\n value=True,\n advanced=True,\n required=False,\n )\n build_config[\"clean_data\"] = clean_data.to_dict()\n else:\n build_config.pop(\"clean_data\", None)\n\n return build_config\n\n def _clean_args(self):\n \"\"\"Prepare arguments based on input type.\"\"\"\n input_data = self.input_data\n\n match input_data:\n case list() if all(isinstance(item, Data) for item in input_data):\n msg = \"List of Data objects is not supported.\"\n raise ValueError(msg)\n case DataFrame():\n return input_data, None\n case Data():\n return None, input_data\n case dict() if \"data\" in input_data:\n try:\n if \"columns\" in input_data: # Likely a DataFrame\n return DataFrame.from_dict(input_data), None\n # Likely a Data object\n return None, Data(**input_data)\n except (TypeError, ValueError, KeyError) as e:\n msg = f\"Invalid structured input provided: {e!s}\"\n raise ValueError(msg) from e\n case _:\n msg = f\"Unsupported input type: {type(input_data)}. Expected DataFrame or Data.\"\n raise ValueError(msg)\n\n def parse_combined_text(self) -> Message:\n \"\"\"Parse all rows/items into a single text or convert input to string if `stringify` is enabled.\"\"\"\n # Early return for stringify option\n if self.mode == \"Stringify\":\n return self.convert_to_string()\n\n df, data = self._clean_args()\n\n lines = []\n if df is not None:\n for _, row in df.iterrows():\n formatted_text = self.pattern.format(**row.to_dict())\n lines.append(formatted_text)\n elif data is not None:\n formatted_text = self.pattern.format(**data.data)\n lines.append(formatted_text)\n\n combined_text = self.sep.join(lines)\n self.status = combined_text\n return Message(text=combined_text)\n\n def convert_to_string(self) -> Message:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n result = \"\"\n if isinstance(self.input_data, list):\n result = \"\\n\".join([safe_convert(item, clean_data=self.clean_data or False) for item in self.input_data])\n else:\n result = safe_convert(self.input_data or False)\n self.log(f\"Converted to string with length: {len(result)}\")\n\n message = Message(text=result)\n self.status = message\n return message\n" + }, + "input_data": { + "_input_type": "HandleInput", + "advanced": false, + "display_name": "Data or DataFrame", + "dynamic": false, + "info": "Accepts either a DataFrame or a Data object.", + "input_types": [ + "DataFrame", + "Data" + ], + "list": false, + "list_add_label": "Add More", + "name": "input_data", + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "trace_as_metadata": true, + "type": "other", + "value": "" + }, + "mode": { + "_input_type": "TabInput", + "advanced": false, + "display_name": "Mode", + "dynamic": false, + "info": "Convert into raw string instead of using a template.", + "name": "mode", + "options": [ + "Parser", + "Stringify" + ], + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "tab", + "value": "Parser" + }, + "pattern": { + "_input_type": "MultilineInput", + "advanced": false, + "copy_field": false, + "display_name": "Template", + "dynamic": true, + "info": "Use variables within curly brackets to extract column values for DataFrames or key values for Data.For example: `Name: {Name}, Age: {Age}, Country: {Country}`", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "multiline": true, + "name": "pattern", + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "type": "str", + "value": "{text}" + }, + "sep": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "Separator", + "dynamic": false, + "info": "String used to separate rows/items.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "sep", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "type": "str", + "value": "\n" + } + }, + "tool_mode": false + }, + "showNode": true, + "type": "ParserComponent" + }, + "dragging": false, + "id": "ParserComponent-tZs7s", + "measured": { + "height": 329, + "width": 320 + }, + "position": { + "x": 1282.0613788430787, + "y": 564.2200355777322 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "id": "LanguageModelComponent-NSTA6", "node": { "base_classes": [ "LanguageModel", @@ -1592,7 +1967,7 @@ ], "frozen": false, "icon": "brain-circuit", - "last_updated": "2025-09-04T20:03:12.869Z", + "last_updated": "2025-09-22T20:02:40.428Z", "legacy": false, "metadata": { "code_hash": "6ac42a7167a4", @@ -1612,7 +1987,7 @@ }, { "name": "langflow", - "version": "0.5.0.post2" + "version": null } ], "total_dependencies": 4 @@ -1881,399 +2256,23 @@ "type": "LanguageModelComponent" }, "dragging": false, - "id": "LanguageModelComponent-0YME7", + "id": "LanguageModelComponent-NSTA6", "measured": { "height": 534, "width": 320 }, "position": { - "x": 2190.7561945382186, - "y": 449.90261812853623 - }, - "selected": false, - "type": "genericNode" - }, - { - "data": { - "id": "Prompt Template-Wo6kR", - "node": { - "base_classes": [ - "Message" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": { - "template": [ - "prompt", - "docs" - ] - }, - "description": "Create a prompt template with dynamic variables.", - "display_name": "Prompt Template", - "documentation": "https://docs.langflow.org/components-prompts", - "edited": false, - "error": null, - "field_order": [ - "template", - "tool_placeholder" - ], - "frozen": false, - "full_path": null, - "icon": "braces", - "is_composition": null, - "is_input": null, - "is_output": null, - "legacy": false, - "metadata": { - "code_hash": "c478a572ccd3", - "dependencies": { - "dependencies": [ - { - "name": "langflow", - "version": "0.5.0.post2" - } - ], - "total_dependencies": 1 - }, - "module": "langflow.components.processing.prompt.PromptComponent" - }, - "minimized": false, - "name": "", - "output_types": [], - "outputs": [ - { - "allows_loop": false, - "cache": true, - "display_name": "Prompt", - "group_outputs": false, - "hidden": null, - "method": "build_prompt", - "name": "prompt", - "options": null, - "required_inputs": null, - "selected": "Message", - "tool_mode": true, - "types": [ - "Message" - ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "priority": 0, - "template": { - "_type": "Component", - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "code", - "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt Template\"\n description: str = \"Create a prompt template with dynamic variables.\"\n documentation: str = \"https://docs.langflow.org/components-prompts\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt Template\"\n priority = 0 # Set priority to 0 to make it appear first\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n" - }, - "docs": { - "advanced": false, - "display_name": "docs", - "dynamic": false, - "field_type": "str", - "fileTypes": [], - "file_path": "", - "info": "", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "multiline": true, - "name": "docs", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "" - }, - "prompt": { - "advanced": false, - "display_name": "prompt", - "dynamic": false, - "field_type": "str", - "fileTypes": [], - "file_path": "", - "info": "", - "input_types": [ - "Message" - ], - "list": false, - "load_from_db": false, - "multiline": true, - "name": "prompt", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "" - }, - "template": { - "_input_type": "PromptInput", - "advanced": false, - "display_name": "Template", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "name": "template", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "type": "prompt", - "value": "You are generating prompt nudges to help a user explore a corpus.\n\nTask:\n1) Skim the documents to infer common themes, entities, or tasks.\n2) Propose exactly three concise, distinct prompt nudges that encourage useful next queries.\n3) If the chat history is provided, use it to generate new questions that the user might have, based on the llm's response to his previous query. DO NOT repeat user questions.\n4) Make the nudges concise, close to 40 characters.\n5) The nudges are questions or commands that the user can make to the chatbot, which will respond looking at the corpus.\n4) Return strings only, separating the nudges by a newline. Don't include quotation marks.\n5) If any error occured, return blank. This will be used in production, so don't ask for more info or confirm a info like you're talking to me still. If, for some reason, you can't provide the nudges, your job failed and you just return blank.\nRules: Be brief. No duplicates. No explanations outside the strings of the nudges. English only.\n\nExamples:\n Show me this quarter's top 10 deals\n Summarize recent client interactions\n Search OpenSearch for mentions of our competitors\n\nChat history:\n{prompt}\n\nDocuments:\n{docs}\n\n" - }, - "tool_placeholder": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Tool Placeholder", - "dynamic": false, - "info": "A placeholder input for tool mode.", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "tool_placeholder", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": true, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" - } - }, - "tool_mode": false - }, - "showNode": true, - "type": "Prompt Template" - }, - "dragging": false, - "id": "Prompt Template-Wo6kR", - "measured": { - "height": 449, - "width": 320 - }, - "position": { - "x": 1669.0365272581178, - "y": 712.1086273287026 - }, - "selected": true, - "type": "genericNode" - }, - { - "data": { - "id": "ParserComponent-tZs7s", - "node": { - "base_classes": [ - "Message" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Extracts text using a template.", - "display_name": "Parser", - "documentation": "https://docs.langflow.org/components-processing#parser", - "edited": false, - "field_order": [ - "input_data", - "mode", - "pattern", - "sep" - ], - "frozen": false, - "icon": "braces", - "legacy": false, - "metadata": { - "code_hash": "556209520650", - "dependencies": { - "dependencies": [ - { - "name": "langflow", - "version": "0.5.0.post2" - } - ], - "total_dependencies": 1 - }, - "module": "langflow.components.processing.parser.ParserComponent" - }, - "minimized": false, - "output_types": [], - "outputs": [ - { - "allows_loop": false, - "cache": true, - "display_name": "Parsed Text", - "group_outputs": false, - "method": "parse_combined_text", - "name": "parsed_text", - "selected": "Message", - "tool_mode": true, - "types": [ - "Message" - ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "code", - "value": "from langflow.custom.custom_component.component import Component\nfrom langflow.helpers.data import safe_convert\nfrom langflow.inputs.inputs import BoolInput, HandleInput, MessageTextInput, MultilineInput, TabInput\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.template.field.base import Output\n\n\nclass ParserComponent(Component):\n display_name = \"Parser\"\n description = \"Extracts text using a template.\"\n documentation: str = \"https://docs.langflow.org/components-processing#parser\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"input_data\",\n display_name=\"Data or DataFrame\",\n input_types=[\"DataFrame\", \"Data\"],\n info=\"Accepts either a DataFrame or a Data object.\",\n required=True,\n ),\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Parser\", \"Stringify\"],\n value=\"Parser\",\n info=\"Convert into raw string instead of using a template.\",\n real_time_refresh=True,\n ),\n MultilineInput(\n name=\"pattern\",\n display_name=\"Template\",\n info=(\n \"Use variables within curly brackets to extract column values for DataFrames \"\n \"or key values for Data.\"\n \"For example: `Name: {Name}, Age: {Age}, Country: {Country}`\"\n ),\n value=\"Text: {text}\", # Example default\n dynamic=True,\n show=True,\n required=True,\n ),\n MessageTextInput(\n name=\"sep\",\n display_name=\"Separator\",\n advanced=True,\n value=\"\\n\",\n info=\"String used to separate rows/items.\",\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"Parsed Text\",\n name=\"parsed_text\",\n info=\"Formatted text output.\",\n method=\"parse_combined_text\",\n ),\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n \"\"\"Dynamically hide/show `template` and enforce requirement based on `stringify`.\"\"\"\n if field_name == \"mode\":\n build_config[\"pattern\"][\"show\"] = self.mode == \"Parser\"\n build_config[\"pattern\"][\"required\"] = self.mode == \"Parser\"\n if field_value:\n clean_data = BoolInput(\n name=\"clean_data\",\n display_name=\"Clean Data\",\n info=(\n \"Enable to clean the data by removing empty rows and lines \"\n \"in each cell of the DataFrame/ Data object.\"\n ),\n value=True,\n advanced=True,\n required=False,\n )\n build_config[\"clean_data\"] = clean_data.to_dict()\n else:\n build_config.pop(\"clean_data\", None)\n\n return build_config\n\n def _clean_args(self):\n \"\"\"Prepare arguments based on input type.\"\"\"\n input_data = self.input_data\n\n match input_data:\n case list() if all(isinstance(item, Data) for item in input_data):\n msg = \"List of Data objects is not supported.\"\n raise ValueError(msg)\n case DataFrame():\n return input_data, None\n case Data():\n return None, input_data\n case dict() if \"data\" in input_data:\n try:\n if \"columns\" in input_data: # Likely a DataFrame\n return DataFrame.from_dict(input_data), None\n # Likely a Data object\n return None, Data(**input_data)\n except (TypeError, ValueError, KeyError) as e:\n msg = f\"Invalid structured input provided: {e!s}\"\n raise ValueError(msg) from e\n case _:\n msg = f\"Unsupported input type: {type(input_data)}. Expected DataFrame or Data.\"\n raise ValueError(msg)\n\n def parse_combined_text(self) -> Message:\n \"\"\"Parse all rows/items into a single text or convert input to string if `stringify` is enabled.\"\"\"\n # Early return for stringify option\n if self.mode == \"Stringify\":\n return self.convert_to_string()\n\n df, data = self._clean_args()\n\n lines = []\n if df is not None:\n for _, row in df.iterrows():\n formatted_text = self.pattern.format(**row.to_dict())\n lines.append(formatted_text)\n elif data is not None:\n formatted_text = self.pattern.format(**data.data)\n lines.append(formatted_text)\n\n combined_text = self.sep.join(lines)\n self.status = combined_text\n return Message(text=combined_text)\n\n def convert_to_string(self) -> Message:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n result = \"\"\n if isinstance(self.input_data, list):\n result = \"\\n\".join([safe_convert(item, clean_data=self.clean_data or False) for item in self.input_data])\n else:\n result = safe_convert(self.input_data or False)\n self.log(f\"Converted to string with length: {len(result)}\")\n\n message = Message(text=result)\n self.status = message\n return message\n" - }, - "input_data": { - "_input_type": "HandleInput", - "advanced": false, - "display_name": "Data or DataFrame", - "dynamic": false, - "info": "Accepts either a DataFrame or a Data object.", - "input_types": [ - "DataFrame", - "Data" - ], - "list": false, - "list_add_label": "Add More", - "name": "input_data", - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "trace_as_metadata": true, - "type": "other", - "value": "" - }, - "mode": { - "_input_type": "TabInput", - "advanced": false, - "display_name": "Mode", - "dynamic": false, - "info": "Convert into raw string instead of using a template.", - "name": "mode", - "options": [ - "Parser", - "Stringify" - ], - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "tab", - "value": "Parser" - }, - "pattern": { - "_input_type": "MultilineInput", - "advanced": false, - "copy_field": false, - "display_name": "Template", - "dynamic": true, - "info": "Use variables within curly brackets to extract column values for DataFrames or key values for Data.For example: `Name: {Name}, Age: {Age}, Country: {Country}`", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "multiline": true, - "name": "pattern", - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "{text}" - }, - "sep": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Separator", - "dynamic": false, - "info": "String used to separate rows/items.", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "sep", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "\n" - } - }, - "tool_mode": false - }, - "showNode": true, - "type": "ParserComponent" - }, - "dragging": false, - "id": "ParserComponent-tZs7s", - "measured": { - "height": 329, - "width": 320 - }, - "position": { - "x": 1282.0613788430787, - "y": 564.2200355777322 + "x": 2172.8054035815467, + "y": 413.44064987070726 }, "selected": false, "type": "genericNode" } ], "viewport": { - "x": -187.81970119617426, - "y": 80.66031410799627, - "zoom": 0.5607296737841687 + "x": -194.9599233786812, + "y": 65.54639038496208, + "zoom": 0.5547428131761893 } }, "description": "OpenRAG Open Search Nudges generator, based on the Open Search documents and the chat history.", diff --git a/frontend/src/app/onboarding/page.tsx b/frontend/src/app/onboarding/page.tsx index bed6a389..c58abfea 100644 --- a/frontend/src/app/onboarding/page.tsx +++ b/frontend/src/app/onboarding/page.tsx @@ -1,10 +1,11 @@ "use client"; +import { useRouter } from "next/navigation"; import { Suspense, useEffect, useState } from "react"; import { toast } from "sonner"; import { - useOnboardingMutation, - type OnboardingVariables, + type OnboardingVariables, + useOnboardingMutation, } from "@/app/api/mutations/useOnboardingMutation"; import IBMLogo from "@/components/logo/ibm-logo"; import OllamaLogo from "@/components/logo/ollama-logo"; @@ -12,198 +13,198 @@ import OpenAILogo from "@/components/logo/openai-logo"; import { ProtectedRoute } from "@/components/protected-route"; import { Button } from "@/components/ui/button"; import { - Card, - CardContent, - CardFooter, - CardHeader, + Card, + CardContent, + CardFooter, + CardHeader, } from "@/components/ui/card"; import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs"; +import { + Tooltip, + TooltipContent, + TooltipTrigger, +} from "@/components/ui/tooltip"; +import { useGetSettingsQuery } from "../api/queries/useGetSettingsQuery"; import { IBMOnboarding } from "./components/ibm-onboarding"; import { OllamaOnboarding } from "./components/ollama-onboarding"; import { OpenAIOnboarding } from "./components/openai-onboarding"; -import { - Tooltip, - TooltipContent, - TooltipTrigger, -} from "@/components/ui/tooltip"; -import { useGetSettingsQuery } from "../api/queries/useGetSettingsQuery"; -import { useRouter } from "next/navigation"; function OnboardingPage() { - const { data: settingsDb, isLoading: isSettingsLoading } = - useGetSettingsQuery(); + const { data: settingsDb, isLoading: isSettingsLoading } = + useGetSettingsQuery(); - const redirect = "/"; + const redirect = "/"; - const router = useRouter(); + const router = useRouter(); - // Redirect if already authenticated or in no-auth mode - useEffect(() => { - if (!isSettingsLoading && settingsDb && settingsDb.edited) { - router.push(redirect); - } - }, [isSettingsLoading, redirect]); + // Redirect if already authenticated or in no-auth mode + useEffect(() => { + if (!isSettingsLoading && settingsDb && settingsDb.edited) { + router.push(redirect); + } + }, [isSettingsLoading, settingsDb, router]); - const [modelProvider, setModelProvider] = useState("openai"); + const [modelProvider, setModelProvider] = useState("openai"); - const [sampleDataset, setSampleDataset] = useState(true); + const [sampleDataset, setSampleDataset] = useState(true); - const handleSetModelProvider = (provider: string) => { - setModelProvider(provider); - setSettings({ - model_provider: provider, - embedding_model: "", - llm_model: "", - }); - }; + const handleSetModelProvider = (provider: string) => { + setModelProvider(provider); + setSettings({ + model_provider: provider, + embedding_model: "", + llm_model: "", + }); + }; - const [settings, setSettings] = useState({ - model_provider: modelProvider, - embedding_model: "", - llm_model: "", - }); + const [settings, setSettings] = useState({ + model_provider: modelProvider, + embedding_model: "", + llm_model: "", + }); - // Mutations - const onboardingMutation = useOnboardingMutation({ - onSuccess: (data) => { - toast.success("Onboarding completed successfully!"); - console.log("Onboarding completed successfully", data); - }, - onError: (error) => { - toast.error("Failed to complete onboarding", { - description: error.message, - }); - }, - }); + // Mutations + const onboardingMutation = useOnboardingMutation({ + onSuccess: (data) => { + toast.success("Onboarding completed successfully!"); + console.log("Onboarding completed successfully", data); + router.push(redirect); + }, + onError: (error) => { + toast.error("Failed to complete onboarding", { + description: error.message, + }); + }, + }); - const handleComplete = () => { - if ( - !settings.model_provider || - !settings.llm_model || - !settings.embedding_model - ) { - toast.error("Please complete all required fields"); - return; - } + const handleComplete = () => { + if ( + !settings.model_provider || + !settings.llm_model || + !settings.embedding_model + ) { + toast.error("Please complete all required fields"); + return; + } - // Prepare onboarding data - const onboardingData: OnboardingVariables = { - model_provider: settings.model_provider, - llm_model: settings.llm_model, - embedding_model: settings.embedding_model, - sample_data: sampleDataset, - }; + // Prepare onboarding data + const onboardingData: OnboardingVariables = { + model_provider: settings.model_provider, + llm_model: settings.llm_model, + embedding_model: settings.embedding_model, + sample_data: sampleDataset, + }; - // Add API key if available - if (settings.api_key) { - onboardingData.api_key = settings.api_key; - } + // Add API key if available + if (settings.api_key) { + onboardingData.api_key = settings.api_key; + } - // Add endpoint if available - if (settings.endpoint) { - onboardingData.endpoint = settings.endpoint; - } + // Add endpoint if available + if (settings.endpoint) { + onboardingData.endpoint = settings.endpoint; + } - // Add project_id if available - if (settings.project_id) { - onboardingData.project_id = settings.project_id; - } + // Add project_id if available + if (settings.project_id) { + onboardingData.project_id = settings.project_id; + } - onboardingMutation.mutate(onboardingData); - }; + onboardingMutation.mutate(onboardingData); + }; - const isComplete = !!settings.llm_model && !!settings.embedding_model; + const isComplete = !!settings.llm_model && !!settings.embedding_model; - return ( -
-
-
-

- Configure your models -

-

[description of task]

-
- - - - - - - OpenAI - - - - IBM - - - - Ollama - - - - - - - - - - - - - - - - - - - - - - {!isComplete ? "Please fill in all required fields" : ""} - - - - -
-
- ); + return ( +
+
+
+

+ Configure your models +

+

[description of task]

+
+ + + + + + + OpenAI + + + + IBM + + + + Ollama + + + + + + + + + + + + + + + + + + + + + + {!isComplete ? "Please fill in all required fields" : ""} + + + + +
+
+ ); } export default function ProtectedOnboardingPage() { - return ( - - Loading onboarding...}> - - - - ); + return ( + + Loading onboarding...}> + + + + ); } diff --git a/src/config/settings.py b/src/config/settings.py index 66f78ce5..6a4afc05 100644 --- a/src/config/settings.py +++ b/src/config/settings.py @@ -496,12 +496,18 @@ class AppClients: WATSONX_LLM_COMPONENT_PATH = os.getenv( "WATSONX_LLM_COMPONENT_PATH", "flows/components/watsonx_llm.json" ) +WATSONX_LLM_TEXT_COMPONENT_PATH = os.getenv( + "WATSONX_LLM_TEXT_COMPONENT_PATH", "flows/components/watsonx_llm_text.json" +) WATSONX_EMBEDDING_COMPONENT_PATH = os.getenv( "WATSONX_EMBEDDING_COMPONENT_PATH", "flows/components/watsonx_embedding.json" ) OLLAMA_LLM_COMPONENT_PATH = os.getenv( "OLLAMA_LLM_COMPONENT_PATH", "flows/components/ollama_llm.json" ) +OLLAMA_LLM_TEXT_COMPONENT_PATH = os.getenv( + "OLLAMA_LLM_TEXT_COMPONENT_PATH", "flows/components/ollama_llm_text.json" +) OLLAMA_EMBEDDING_COMPONENT_PATH = os.getenv( "OLLAMA_EMBEDDING_COMPONENT_PATH", "flows/components/ollama_embedding.json" ) @@ -514,6 +520,9 @@ OPENAI_EMBEDDING_COMPONENT_ID = os.getenv( OPENAI_LLM_COMPONENT_ID = os.getenv( "OPENAI_LLM_COMPONENT_ID", "LanguageModelComponent-0YME7" ) +OPENAI_LLM_TEXT_COMPONENT_ID = os.getenv( + "OPENAI_LLM_TEXT_COMPONENT_ID", "LanguageModelComponent-NSTA6" +) # Provider-specific component IDs WATSONX_EMBEDDING_COMPONENT_ID = os.getenv( @@ -522,11 +531,18 @@ WATSONX_EMBEDDING_COMPONENT_ID = os.getenv( WATSONX_LLM_COMPONENT_ID = os.getenv( "WATSONX_LLM_COMPONENT_ID", "IBMwatsonxModel-jA4Nw" ) +WATSONX_LLM_TEXT_COMPONENT_ID = os.getenv( + "WATSONX_LLM_TEXT_COMPONENT_ID", "IBMwatsonxModel-18kmA" +) + OLLAMA_EMBEDDING_COMPONENT_ID = os.getenv( "OLLAMA_EMBEDDING_COMPONENT_ID", "OllamaEmbeddings-4ah5Q" ) OLLAMA_LLM_COMPONENT_ID = os.getenv("OLLAMA_LLM_COMPONENT_ID", "OllamaModel-eCsJx") +OLLAMA_LLM_TEXT_COMPONENT_ID = os.getenv( + "OLLAMA_LLM_TEXT_COMPONENT_ID", "OllamaModel-XDGqZ" +) # Global clients instance clients = AppClients() diff --git a/src/services/flows_service.py b/src/services/flows_service.py index 13a1e931..4c3872ca 100644 --- a/src/services/flows_service.py +++ b/src/services/flows_service.py @@ -3,8 +3,13 @@ from config.settings import ( LANGFLOW_URL, LANGFLOW_CHAT_FLOW_ID, LANGFLOW_INGEST_FLOW_ID, + OLLAMA_LLM_TEXT_COMPONENT_ID, + OLLAMA_LLM_TEXT_COMPONENT_PATH, OPENAI_EMBEDDING_COMPONENT_ID, OPENAI_LLM_COMPONENT_ID, + OPENAI_LLM_TEXT_COMPONENT_ID, + WATSONX_LLM_TEXT_COMPONENT_ID, + WATSONX_LLM_TEXT_COMPONENT_PATH, clients, WATSONX_LLM_COMPONENT_PATH, WATSONX_EMBEDDING_COMPONENT_PATH, @@ -146,7 +151,7 @@ class FlowsService: try: # Load component templates based on provider - llm_template, embedding_template = self._load_component_templates(provider) + llm_template, embedding_template, llm_text_template = self._load_component_templates(provider) logger.info(f"Assigning {provider} components") @@ -158,6 +163,7 @@ class FlowsService: "flow_id": NUDGES_FLOW_ID, "embedding_id": OPENAI_EMBEDDING_COMPONENT_ID, "llm_id": OPENAI_LLM_COMPONENT_ID, + "llm_text_id": OPENAI_LLM_TEXT_COMPONENT_ID, }, { "name": "retrieval", @@ -165,6 +171,7 @@ class FlowsService: "flow_id": LANGFLOW_CHAT_FLOW_ID, "embedding_id": OPENAI_EMBEDDING_COMPONENT_ID, "llm_id": OPENAI_LLM_COMPONENT_ID, + "llm_text_id": None, }, { "name": "ingest", @@ -172,6 +179,7 @@ class FlowsService: "flow_id": LANGFLOW_INGEST_FLOW_ID, "embedding_id": OPENAI_EMBEDDING_COMPONENT_ID, "llm_id": None, # Ingestion flow might not have LLM + "llm_text_id": None, # Ingestion flow might not have LLM Text }, ] @@ -181,7 +189,7 @@ class FlowsService: for config in flow_configs: try: result = await self._update_flow_components( - config, llm_template, embedding_template + config, llm_template, embedding_template, llm_text_template ) results.append(result) logger.info(f"Successfully updated {config['name']} flow") @@ -215,9 +223,11 @@ class FlowsService: if provider == "watsonx": llm_path = WATSONX_LLM_COMPONENT_PATH embedding_path = WATSONX_EMBEDDING_COMPONENT_PATH + llm_text_path = WATSONX_LLM_TEXT_COMPONENT_PATH elif provider == "ollama": llm_path = OLLAMA_LLM_COMPONENT_PATH embedding_path = OLLAMA_EMBEDDING_COMPONENT_PATH + llm_text_path = OLLAMA_LLM_TEXT_COMPONENT_PATH else: raise ValueError(f"Unsupported provider: {provider}") @@ -246,21 +256,31 @@ class FlowsService: with open(embedding_full_path, "r") as f: embedding_template = json.load(f) - logger.info(f"Loaded component templates for {provider}") - return llm_template, embedding_template + # Load LLM Text template + llm_text_full_path = os.path.join(project_root, llm_text_path) + if not os.path.exists(llm_text_full_path): + raise FileNotFoundError( + f"LLM Text component template not found at: {llm_text_full_path}" + ) - async def _update_flow_components(self, config, llm_template, embedding_template): + with open(llm_text_full_path, "r") as f: + llm_text_template = json.load(f) + + logger.info(f"Loaded component templates for {provider}") + return llm_template, embedding_template, llm_text_template + + async def _update_flow_components(self, config, llm_template, embedding_template, llm_text_template): """Update components in a specific flow""" flow_name = config["name"] flow_file = config["file"] flow_id = config["flow_id"] old_embedding_id = config["embedding_id"] old_llm_id = config["llm_id"] - + old_llm_text_id = config["llm_text_id"] # Extract IDs from templates new_llm_id = llm_template["data"]["id"] new_embedding_id = embedding_template["data"]["id"] - + new_llm_text_id = llm_text_template["data"]["id"] # Get the project root directory current_file_dir = os.path.dirname(os.path.abspath(__file__)) src_dir = os.path.dirname(current_file_dir) @@ -308,6 +328,21 @@ class FlowsService: self._replace_node_in_flow(flow_data, old_llm_id, new_llm_node) components_updated.append(f"llm: {old_llm_id} -> {new_llm_id}") + # Replace LLM component (if exists in this flow) + if old_llm_text_id: + llm_text_node = self._find_node_by_id(flow_data, old_llm_text_id) + if llm_text_node: + # Preserve position + original_position = llm_text_node.get("position", {}) + + # Replace with new template + new_llm_text_node = llm_text_template.copy() + new_llm_text_node["position"] = original_position + + # Replace in flow + self._replace_node_in_flow(flow_data, old_llm_text_id, new_llm_text_node) + components_updated.append(f"llm: {old_llm_text_id} -> {new_llm_text_id}") + # Update all edge references using regex replacement flow_json_str = json.dumps(flow_data) @@ -326,6 +361,11 @@ class FlowsService: flow_json_str = re.sub( re.escape(old_llm_id), new_llm_id, flow_json_str ) + if old_llm_text_id: + flow_json_str = re.sub( + re.escape(old_llm_text_id), new_llm_text_id, flow_json_str + ) + flow_json_str = re.sub( re.escape(old_llm_id.split("-")[0]), new_llm_id.split("-")[0], @@ -415,7 +455,7 @@ class FlowsService: ] # Determine target component IDs based on provider - target_embedding_id, target_llm_id = self._get_provider_component_ids( + target_embedding_id, target_llm_id, target_llm_text_id = self._get_provider_component_ids( provider ) @@ -429,6 +469,7 @@ class FlowsService: provider, target_embedding_id, target_llm_id, + target_llm_text_id, embedding_model, llm_model, endpoint, @@ -471,12 +512,12 @@ class FlowsService: def _get_provider_component_ids(self, provider: str): """Get the component IDs for a specific provider""" if provider == "watsonx": - return WATSONX_EMBEDDING_COMPONENT_ID, WATSONX_LLM_COMPONENT_ID + return WATSONX_EMBEDDING_COMPONENT_ID, WATSONX_LLM_COMPONENT_ID, WATSONX_LLM_TEXT_COMPONENT_ID elif provider == "ollama": - return OLLAMA_EMBEDDING_COMPONENT_ID, OLLAMA_LLM_COMPONENT_ID + return OLLAMA_EMBEDDING_COMPONENT_ID, OLLAMA_LLM_COMPONENT_ID, OLLAMA_LLM_TEXT_COMPONENT_ID elif provider == "openai": # OpenAI components are the default ones - return OPENAI_EMBEDDING_COMPONENT_ID, OPENAI_LLM_COMPONENT_ID + return OPENAI_EMBEDDING_COMPONENT_ID, OPENAI_LLM_COMPONENT_ID, OPENAI_LLM_TEXT_COMPONENT_ID else: raise ValueError(f"Unsupported provider: {provider}") @@ -486,6 +527,7 @@ class FlowsService: provider: str, target_embedding_id: str, target_llm_id: str, + target_llm_text_id: str, embedding_model: str, llm_model: str, endpoint: str = None, @@ -512,7 +554,7 @@ class FlowsService: embedding_node = self._find_node_by_id(flow_data, target_embedding_id) if embedding_node: if self._update_component_fields( - embedding_node, provider, "embedding", embedding_model, endpoint + embedding_node, provider, embedding_model, endpoint ): updates_made.append(f"embedding model: {embedding_model}") @@ -521,7 +563,15 @@ class FlowsService: llm_node = self._find_node_by_id(flow_data, target_llm_id) if llm_node: if self._update_component_fields( - llm_node, provider, "llm", llm_model, endpoint + llm_node, provider, llm_model, endpoint + ): + updates_made.append(f"llm model: {llm_model}") + + if target_llm_text_id: + llm_text_node = self._find_node_by_id(flow_data, target_llm_text_id) + if llm_text_node: + if self._update_component_fields( + llm_text_node, provider, llm_model, endpoint ): updates_made.append(f"llm model: {llm_model}") @@ -569,7 +619,11 @@ class FlowsService: updated = False # Update model_name field (common to all providers) - if "model_name" in template: + if provider == "openai" and "model" in template: + template["model"]["value"] = model_value + template["model"]["options"] = [model_value] + updated = True + elif "model_name" in template: template["model_name"]["value"] = model_value template["model_name"]["options"] = [model_value] updated = True diff --git a/src/services/models_service.py b/src/services/models_service.py index e38c4e8b..35a54895 100644 --- a/src/services/models_service.py +++ b/src/services/models_service.py @@ -124,10 +124,8 @@ class ModelsService: for model in models: model_name = model.get(JSON_NAME_KEY, "") - # Remove tag if present (e.g., "llama3:latest" -> "llama3") - clean_model_name = model_name.split(":")[0] if model_name else "" - if not clean_model_name: + if not model_name: continue logger.debug(f"Checking model: {model_name}") @@ -152,7 +150,7 @@ class ModelsService: # Check if it's an embedding model is_embedding = any( - embed_model in clean_model_name.lower() + embed_model in model_name.lower() for embed_model in self.OLLAMA_EMBEDDING_MODELS ) @@ -160,8 +158,8 @@ class ModelsService: # Embedding models only need completion capability embedding_models.append( { - "value": clean_model_name, - "label": clean_model_name, + "value": model_name, + "label": model_name, "default": False, } ) @@ -169,9 +167,9 @@ class ModelsService: # Language models need both completion and tool calling language_models.append( { - "value": clean_model_name, - "label": clean_model_name, - "default": "llama3" in clean_model_name.lower(), + "value": model_name, + "label": model_name, + "default": "llama3" in model_name.lower(), } ) except Exception as e: