diff --git a/flows/components/ollama_embedding.json b/flows/components/ollama_embedding.json index efb12882..24974b46 100644 --- a/flows/components/ollama_embedding.json +++ b/flows/components/ollama_embedding.json @@ -1,171 +1,142 @@ { "data": { - "edges": [], - "nodes": [ - { - "data": { - "node": { - "template": { - "_type": "Component", - "base_url": { - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "load_from_db": false, - "list": false, - "list_add_label": "Add More", - "required": true, - "placeholder": "", - "show": true, - "name": "base_url", - "value": "http://host.docker.internal:11434", - "display_name": "Ollama Base URL", - "advanced": false, - "input_types": [ - "Message" - ], - "dynamic": false, - "info": "", - "title_case": false, - "type": "str", - "_input_type": "MessageTextInput" - }, - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "from typing import Any\nfrom urllib.parse import urljoin\n\nimport httpx\nfrom langchain_ollama import OllamaEmbeddings\n\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.base.models.ollama_constants import OLLAMA_EMBEDDING_MODELS, URL_LIST\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import DropdownInput, MessageTextInput, Output\n\nHTTP_STATUS_OK = 200\n\n\nclass OllamaEmbeddingsComponent(LCModelComponent):\n display_name: str = \"Ollama Embeddings\"\n description: str = \"Generate embeddings using Ollama models.\"\n documentation = \"https://python.langchain.com/docs/integrations/text_embedding/ollama\"\n icon = \"Ollama\"\n name = \"OllamaEmbeddings\"\n\n inputs = [\n DropdownInput(\n name=\"model_name\",\n display_name=\"Ollama Model\",\n value=\"\",\n options=[],\n real_time_refresh=True,\n refresh_button=True,\n combobox=True,\n required=True,\n ),\n MessageTextInput(\n name=\"base_url\",\n display_name=\"Ollama Base URL\",\n value=\"\",\n required=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Embeddings\", name=\"embeddings\", method=\"build_embeddings\"),\n ]\n\n def build_embeddings(self) -> Embeddings:\n try:\n output = OllamaEmbeddings(model=self.model_name, base_url=self.base_url)\n except Exception as e:\n msg = (\n \"Unable to connect to the Ollama API. \",\n \"Please verify the base URL, ensure the relevant Ollama model is pulled, and try again.\",\n )\n raise ValueError(msg) from e\n return output\n\n async def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None):\n if field_name in {\"base_url\", \"model_name\"} and not await self.is_valid_ollama_url(field_value):\n # Check if any URL in the list is valid\n valid_url = \"\"\n for url in URL_LIST:\n if await self.is_valid_ollama_url(url):\n valid_url = url\n break\n build_config[\"base_url\"][\"value\"] = valid_url\n if field_name in {\"model_name\", \"base_url\", \"tool_model_enabled\"}:\n if await self.is_valid_ollama_url(self.base_url):\n build_config[\"model_name\"][\"options\"] = await self.get_model(self.base_url)\n elif await self.is_valid_ollama_url(build_config[\"base_url\"].get(\"value\", \"\")):\n build_config[\"model_name\"][\"options\"] = await self.get_model(build_config[\"base_url\"].get(\"value\", \"\"))\n else:\n build_config[\"model_name\"][\"options\"] = []\n\n return build_config\n\n async def get_model(self, base_url_value: str) -> list[str]:\n \"\"\"Get the model names from Ollama.\"\"\"\n model_ids = []\n try:\n url = urljoin(base_url_value, \"/api/tags\")\n async with httpx.AsyncClient() as client:\n response = await client.get(url)\n response.raise_for_status()\n data = response.json()\n\n model_ids = [model[\"name\"] for model in data.get(\"models\", [])]\n # this to ensure that not embedding models are included.\n # not even the base models since models can have 1b 2b etc\n # handles cases when embeddings models have tags like :latest - etc.\n model_ids = [\n model\n for model in model_ids\n if any(model.startswith(f\"{embedding_model}\") for embedding_model in OLLAMA_EMBEDDING_MODELS)\n ]\n\n except (ImportError, ValueError, httpx.RequestError) as e:\n msg = \"Could not get model names from Ollama.\"\n raise ValueError(msg) from e\n\n return model_ids\n\n async def is_valid_ollama_url(self, url: str) -> bool:\n try:\n async with httpx.AsyncClient() as client:\n return (await client.get(f\"{url}/api/tags\")).status_code == HTTP_STATUS_OK\n except httpx.RequestError:\n return False\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "model_name": { - "tool_mode": false, - "trace_as_metadata": true, - "options": [ - "nomic-embed-text:latest", - "all-minilm:latest" - ], - "options_metadata": [], - "combobox": true, - "dialog_inputs": {}, - "toggle": false, - "required": true, - "placeholder": "", - "show": true, - "name": "model_name", - "value": "", - "display_name": "Ollama Model", - "advanced": false, - "dynamic": false, - "info": "", - "real_time_refresh": true, - "refresh_button": true, - "title_case": false, - "external_options": {}, - "type": "str", - "_input_type": "DropdownInput" - } - }, - "description": "Generate embeddings using Ollama models.", - "icon": "Ollama", - "base_classes": [ - "Embeddings" - ], - "display_name": "Ollama Embeddings", - "documentation": "https://python.langchain.com/docs/integrations/text_embedding/ollama", - "minimized": false, - "custom_fields": {}, - "output_types": [], - "pinned": false, - "conditional_paths": [], - "frozen": false, - "outputs": [ - { - "types": [ - "Embeddings" - ], - "selected": "Embeddings", - "name": "embeddings", - "display_name": "Embeddings", - "method": "build_embeddings", - "value": "__UNDEFINED__", - "cache": true, - "required_inputs": null, - "allows_loop": false, - "group_outputs": false, - "options": null, - "tool_mode": true - } - ], - "field_order": [ - "model_name", - "base_url" - ], - "beta": false, - "legacy": false, - "edited": false, - "metadata": { - "keywords": [ - "model", - "llm", - "language model", - "large language model" - ], - "module": "lfx.components.ollama.ollama_embeddings.OllamaEmbeddingsComponent", - "code_hash": "c41821735548", - "dependencies": { - "total_dependencies": 3, - "dependencies": [ - { - "name": "httpx", - "version": "0.28.1" - }, - { - "name": "langchain_ollama", - "version": "0.2.1" - }, - { - "name": "lfx", - "version": null - } - ] - } - }, - "tool_mode": false, - "last_updated": "2025-09-29T18:40:10.242Z", - "official": false - }, - "showNode": true, - "type": "OllamaEmbeddings", - "id": "OllamaEmbeddings-vnNn8" + "node": { + "template": { + "_type": "Component", + "base_url": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": true, + "list": false, + "list_add_label": "Add More", + "required": true, + "placeholder": "", + "show": true, + "name": "base_url", + "value": "OLLAMA_BASE_URL", + "display_name": "Ollama Base URL", + "advanced": false, + "input_types": ["Message"], + "dynamic": false, + "info": "", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" }, - "id": "OllamaEmbeddings-vnNn8", - "position": { - "x": 0, - "y": 0 + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "from typing import Any\nfrom urllib.parse import urljoin\n\nimport httpx\nfrom langchain_ollama import OllamaEmbeddings\n\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.base.models.ollama_constants import OLLAMA_EMBEDDING_MODELS, URL_LIST\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import DropdownInput, MessageTextInput, Output\n\nHTTP_STATUS_OK = 200\n\n\nclass OllamaEmbeddingsComponent(LCModelComponent):\n display_name: str = \"Ollama Embeddings\"\n description: str = \"Generate embeddings using Ollama models.\"\n documentation = \"https://python.langchain.com/docs/integrations/text_embedding/ollama\"\n icon = \"Ollama\"\n name = \"OllamaEmbeddings\"\n\n inputs = [\n DropdownInput(\n name=\"model_name\",\n display_name=\"Ollama Model\",\n value=\"\",\n options=[],\n real_time_refresh=True,\n refresh_button=True,\n combobox=True,\n required=True,\n ),\n MessageTextInput(\n name=\"base_url\",\n display_name=\"Ollama Base URL\",\n value=\"\",\n required=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Embeddings\", name=\"embeddings\", method=\"build_embeddings\"),\n ]\n\n def build_embeddings(self) -> Embeddings:\n try:\n output = OllamaEmbeddings(model=self.model_name, base_url=self.base_url)\n except Exception as e:\n msg = (\n \"Unable to connect to the Ollama API. \",\n \"Please verify the base URL, ensure the relevant Ollama model is pulled, and try again.\",\n )\n raise ValueError(msg) from e\n return output\n\n async def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None):\n if field_name in {\"base_url\", \"model_name\"} and not await self.is_valid_ollama_url(field_value):\n # Check if any URL in the list is valid\n valid_url = \"\"\n for url in URL_LIST:\n if await self.is_valid_ollama_url(url):\n valid_url = url\n break\n build_config[\"base_url\"][\"value\"] = valid_url\n if field_name in {\"model_name\", \"base_url\", \"tool_model_enabled\"}:\n if await self.is_valid_ollama_url(self.base_url):\n build_config[\"model_name\"][\"options\"] = await self.get_model(self.base_url)\n elif await self.is_valid_ollama_url(build_config[\"base_url\"].get(\"value\", \"\")):\n build_config[\"model_name\"][\"options\"] = await self.get_model(build_config[\"base_url\"].get(\"value\", \"\"))\n else:\n build_config[\"model_name\"][\"options\"] = []\n\n return build_config\n\n async def get_model(self, base_url_value: str) -> list[str]:\n \"\"\"Get the model names from Ollama.\"\"\"\n model_ids = []\n try:\n url = urljoin(base_url_value, \"/api/tags\")\n async with httpx.AsyncClient() as client:\n response = await client.get(url)\n response.raise_for_status()\n data = response.json()\n\n model_ids = [model[\"name\"] for model in data.get(\"models\", [])]\n # this to ensure that not embedding models are included.\n # not even the base models since models can have 1b 2b etc\n # handles cases when embeddings models have tags like :latest - etc.\n model_ids = [\n model\n for model in model_ids\n if any(model.startswith(f\"{embedding_model}\") for embedding_model in OLLAMA_EMBEDDING_MODELS)\n ]\n\n except (ImportError, ValueError, httpx.RequestError) as e:\n msg = \"Could not get model names from Ollama.\"\n raise ValueError(msg) from e\n\n return model_ids\n\n async def is_valid_ollama_url(self, url: str) -> bool:\n try:\n async with httpx.AsyncClient() as client:\n return (await client.get(f\"{url}/api/tags\")).status_code == HTTP_STATUS_OK\n except httpx.RequestError:\n return False\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false }, - "type": "genericNode" - } - ], - "viewport": { - "x": 1, - "y": 1, - "zoom": 1 - } + "model_name": { + "tool_mode": false, + "trace_as_metadata": true, + "options": ["nomic-embed-text:latest", "all-minilm:latest"], + "options_metadata": [], + "combobox": true, + "dialog_inputs": {}, + "toggle": false, + "required": true, + "placeholder": "", + "show": true, + "name": "model_name", + "value": "", + "display_name": "Ollama Model", + "advanced": false, + "dynamic": false, + "info": "", + "real_time_refresh": true, + "refresh_button": true, + "title_case": false, + "external_options": {}, + "type": "str", + "_input_type": "DropdownInput" + } + }, + "description": "Generate embeddings using Ollama models.", + "icon": "Ollama", + "base_classes": ["Embeddings"], + "display_name": "Ollama Embeddings", + "documentation": "https://python.langchain.com/docs/integrations/text_embedding/ollama", + "minimized": false, + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": ["Embeddings"], + "selected": "Embeddings", + "name": "embeddings", + "display_name": "Embeddings", + "method": "build_embeddings", + "value": "__UNDEFINED__", + "cache": true, + "required_inputs": null, + "allows_loop": false, + "group_outputs": false, + "options": null, + "tool_mode": true + } + ], + "field_order": ["model_name", "base_url"], + "beta": false, + "legacy": false, + "edited": false, + "metadata": { + "keywords": [ + "model", + "llm", + "language model", + "large language model" + ], + "module": "lfx.components.ollama.ollama_embeddings.OllamaEmbeddingsComponent", + "code_hash": "c41821735548", + "dependencies": { + "total_dependencies": 3, + "dependencies": [ + { + "name": "httpx", + "version": "0.28.1" + }, + { + "name": "langchain_ollama", + "version": "0.2.1" + }, + { + "name": "lfx", + "version": null + } + ] + } + }, + "tool_mode": false, + "last_updated": "2025-09-29T18:40:10.242Z", + "official": false + }, + "showNode": true, + "type": "OllamaEmbeddings", + "id": "OllamaEmbeddings-vnNn8" }, - "description": "Generate embeddings using Ollama models.", - "name": "Ollama Embeddings", "id": "OllamaEmbeddings-vnNn8", - "is_component": true, - "last_tested_version": "1.6.0" + "position": { + "x": 0, + "y": 0 + }, + "type": "genericNode" } \ No newline at end of file diff --git a/flows/components/ollama_llm.json b/flows/components/ollama_llm.json index 44e9d347..4aa6ee62 100644 --- a/flows/components/ollama_llm.json +++ b/flows/components/ollama_llm.json @@ -1,8 +1,4 @@ { - "data": { - "edges": [], - "nodes": [ - { "data": { "node": { "template": { @@ -11,14 +7,14 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, - "load_from_db": false, + "load_from_db": true, "list": false, "list_add_label": "Add More", "required": false, "placeholder": "", "show": true, "name": "base_url", - "value": "", + "value": "OLLAMA_BASE_URL", "display_name": "Base URL", "advanced": false, "input_types": [ @@ -605,95 +601,82 @@ "cache": true, "required_inputs": null, "allows_loop": false, - "group_outputs": false, - "options": null, - "tool_mode": true - } - ], - "field_order": [ - "base_url", - "model_name", - "temperature", - "format", - "metadata", - "mirostat", - "mirostat_eta", - "mirostat_tau", - "num_ctx", - "num_gpu", - "num_thread", - "repeat_last_n", - "repeat_penalty", - "tfs_z", - "timeout", - "top_k", - "top_p", - "verbose", - "tags", - "stop_tokens", - "system", - "tool_model_enabled", - "template", - "input_value", - "system_message", - "stream" - ], - "beta": false, - "legacy": false, - "edited": false, - "metadata": { - "keywords": [ - "model", - "llm", - "language model", - "large language model" - ], - "module": "lfx.components.ollama.ollama.ChatOllamaComponent", - "code_hash": "54de3b5da388", - "dependencies": { - "total_dependencies": 3, - "dependencies": [ - { - "name": "httpx", - "version": "0.28.1" - }, - { - "name": "langchain_ollama", - "version": "0.2.1" - }, - { - "name": "lfx", - "version": null - } - ] - } + "group_outputs": false, + "options": null, + "tool_mode": true + } + ], + "field_order": [ + "base_url", + "model_name", + "temperature", + "format", + "metadata", + "mirostat", + "mirostat_eta", + "mirostat_tau", + "num_ctx", + "num_gpu", + "num_thread", + "repeat_last_n", + "repeat_penalty", + "tfs_z", + "timeout", + "top_k", + "top_p", + "verbose", + "tags", + "stop_tokens", + "system", + "tool_model_enabled", + "template", + "input_value", + "system_message", + "stream" + ], + "beta": false, + "legacy": false, + "edited": false, + "metadata": { + "keywords": [ + "model", + "llm", + "language model", + "large language model" + ], + "module": "lfx.components.ollama.ollama.ChatOllamaComponent", + "code_hash": "54de3b5da388", + "dependencies": { + "total_dependencies": 3, + "dependencies": [ + { + "name": "httpx", + "version": "0.28.1" }, - "tool_mode": false, - "last_updated": "2025-09-29T18:39:30.798Z", - "official": false - }, - "showNode": true, - "type": "OllamaModel", - "id": "OllamaModel-8Re0J", - "selected_output": "model_output" - }, - "id": "OllamaModel-8Re0J", - "position": { - "x": 0, - "y": 0 - }, - "type": "genericNode" - } - ], - "viewport": { - "x": 1, - "y": 1, - "zoom": 1 - } + { + "name": "langchain_ollama", + "version": "0.2.1" + }, + { + "name": "lfx", + "version": null + } + ] + } + }, + "tool_mode": false, + "last_updated": "2025-09-29T18:39:30.798Z", + "official": false + }, + "showNode": true, + "type": "OllamaModel", + "id": "OllamaModel-8Re0J", + "selected_output": "model_output" }, - "description": "Generate text using Ollama Local LLMs.", - "name": "Ollama", "id": "OllamaModel-8Re0J", - "is_component": true, - "last_tested_version": "1.6.0" + "position": { + "x": 0, + "y": 0 + }, + "type": "genericNode" } \ No newline at end of file diff --git a/flows/components/ollama_llm_text.json b/flows/components/ollama_llm_text.json index 5d8076ed..9b2b5482 100644 --- a/flows/components/ollama_llm_text.json +++ b/flows/components/ollama_llm_text.json @@ -1,700 +1,683 @@ { "data": { - "edges": [], - "nodes": [ - { - "data": { - "node": { - "template": { - "_type": "Component", - "base_url": { - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "load_from_db": false, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "base_url", - "value": "", - "display_name": "Base URL", - "advanced": false, - "input_types": [ - "Message" - ], - "dynamic": false, - "info": "Endpoint of the Ollama API.", - "real_time_refresh": true, - "title_case": false, - "type": "str", - "_input_type": "MessageTextInput" - }, - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "import asyncio\nfrom typing import Any\nfrom urllib.parse import urljoin\n\nimport httpx\nfrom langchain_ollama import ChatOllama\n\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.base.models.ollama_constants import URL_LIST\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, MessageTextInput, SliderInput\nfrom lfx.log.logger import logger\n\nHTTP_STATUS_OK = 200\n\n\nclass ChatOllamaComponent(LCModelComponent):\n display_name = \"Ollama\"\n description = \"Generate text using Ollama Local LLMs.\"\n icon = \"Ollama\"\n name = \"OllamaModel\"\n\n # Define constants for JSON keys\n JSON_MODELS_KEY = \"models\"\n JSON_NAME_KEY = \"name\"\n JSON_CAPABILITIES_KEY = \"capabilities\"\n DESIRED_CAPABILITY = \"completion\"\n TOOL_CALLING_CAPABILITY = \"tools\"\n\n inputs = [\n MessageTextInput(\n name=\"base_url\",\n display_name=\"Base URL\",\n info=\"Endpoint of the Ollama API.\",\n value=\"\",\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=[],\n info=\"Refer to https://ollama.com/library for more models.\",\n refresh_button=True,\n real_time_refresh=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n MessageTextInput(\n name=\"format\", display_name=\"Format\", info=\"Specify the format of the output (e.g., json).\", advanced=True\n ),\n DictInput(name=\"metadata\", display_name=\"Metadata\", info=\"Metadata to add to the run trace.\", advanced=True),\n DropdownInput(\n name=\"mirostat\",\n display_name=\"Mirostat\",\n options=[\"Disabled\", \"Mirostat\", \"Mirostat 2.0\"],\n info=\"Enable/disable Mirostat sampling for controlling perplexity.\",\n value=\"Disabled\",\n advanced=True,\n real_time_refresh=True,\n ),\n FloatInput(\n name=\"mirostat_eta\",\n display_name=\"Mirostat Eta\",\n info=\"Learning rate for Mirostat algorithm. (Default: 0.1)\",\n advanced=True,\n ),\n FloatInput(\n name=\"mirostat_tau\",\n display_name=\"Mirostat Tau\",\n info=\"Controls the balance between coherence and diversity of the output. (Default: 5.0)\",\n advanced=True,\n ),\n IntInput(\n name=\"num_ctx\",\n display_name=\"Context Window Size\",\n info=\"Size of the context window for generating tokens. (Default: 2048)\",\n advanced=True,\n ),\n IntInput(\n name=\"num_gpu\",\n display_name=\"Number of GPUs\",\n info=\"Number of GPUs to use for computation. (Default: 1 on macOS, 0 to disable)\",\n advanced=True,\n ),\n IntInput(\n name=\"num_thread\",\n display_name=\"Number of Threads\",\n info=\"Number of threads to use during computation. (Default: detected for optimal performance)\",\n advanced=True,\n ),\n IntInput(\n name=\"repeat_last_n\",\n display_name=\"Repeat Last N\",\n info=\"How far back the model looks to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)\",\n advanced=True,\n ),\n FloatInput(\n name=\"repeat_penalty\",\n display_name=\"Repeat Penalty\",\n info=\"Penalty for repetitions in generated text. (Default: 1.1)\",\n advanced=True,\n ),\n FloatInput(name=\"tfs_z\", display_name=\"TFS Z\", info=\"Tail free sampling value. (Default: 1)\", advanced=True),\n IntInput(name=\"timeout\", display_name=\"Timeout\", info=\"Timeout for the request stream.\", advanced=True),\n IntInput(\n name=\"top_k\", display_name=\"Top K\", info=\"Limits token selection to top K. (Default: 40)\", advanced=True\n ),\n FloatInput(name=\"top_p\", display_name=\"Top P\", info=\"Works together with top-k. (Default: 0.9)\", advanced=True),\n BoolInput(name=\"verbose\", display_name=\"Verbose\", info=\"Whether to print out response text.\", advanced=True),\n MessageTextInput(\n name=\"tags\",\n display_name=\"Tags\",\n info=\"Comma-separated list of tags to add to the run trace.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"stop_tokens\",\n display_name=\"Stop Tokens\",\n info=\"Comma-separated list of tokens to signal the model to stop generating text.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"system\", display_name=\"System\", info=\"System to use for generating text.\", advanced=True\n ),\n BoolInput(\n name=\"tool_model_enabled\",\n display_name=\"Tool Model Enabled\",\n info=\"Whether to enable tool calling in the model.\",\n value=True,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"template\", display_name=\"Template\", info=\"Template to use for generating text.\", advanced=True\n ),\n *LCModelComponent.get_base_inputs(),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # Mapping mirostat settings to their corresponding values\n mirostat_options = {\"Mirostat\": 1, \"Mirostat 2.0\": 2}\n\n # Default to 0 for 'Disabled'\n mirostat_value = mirostat_options.get(self.mirostat, 0)\n\n # Set mirostat_eta and mirostat_tau to None if mirostat is disabled\n if mirostat_value == 0:\n mirostat_eta = None\n mirostat_tau = None\n else:\n mirostat_eta = self.mirostat_eta\n mirostat_tau = self.mirostat_tau\n\n # Mapping system settings to their corresponding values\n llm_params = {\n \"base_url\": self.base_url,\n \"model\": self.model_name,\n \"mirostat\": mirostat_value,\n \"format\": self.format,\n \"metadata\": self.metadata,\n \"tags\": self.tags.split(\",\") if self.tags else None,\n \"mirostat_eta\": mirostat_eta,\n \"mirostat_tau\": mirostat_tau,\n \"num_ctx\": self.num_ctx or None,\n \"num_gpu\": self.num_gpu or None,\n \"num_thread\": self.num_thread or None,\n \"repeat_last_n\": self.repeat_last_n or None,\n \"repeat_penalty\": self.repeat_penalty or None,\n \"temperature\": self.temperature or None,\n \"stop\": self.stop_tokens.split(\",\") if self.stop_tokens else None,\n \"system\": self.system,\n \"tfs_z\": self.tfs_z or None,\n \"timeout\": self.timeout or None,\n \"top_k\": self.top_k or None,\n \"top_p\": self.top_p or None,\n \"verbose\": self.verbose,\n \"template\": self.template,\n }\n\n # Remove parameters with None values\n llm_params = {k: v for k, v in llm_params.items() if v is not None}\n\n try:\n output = ChatOllama(**llm_params)\n except Exception as e:\n msg = (\n \"Unable to connect to the Ollama API. \",\n \"Please verify the base URL, ensure the relevant Ollama model is pulled, and try again.\",\n )\n raise ValueError(msg) from e\n\n return output\n\n async def is_valid_ollama_url(self, url: str) -> bool:\n try:\n async with httpx.AsyncClient() as client:\n return (await client.get(urljoin(url, \"api/tags\"))).status_code == HTTP_STATUS_OK\n except httpx.RequestError:\n return False\n\n async def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None):\n if field_name == \"mirostat\":\n if field_value == \"Disabled\":\n build_config[\"mirostat_eta\"][\"advanced\"] = True\n build_config[\"mirostat_tau\"][\"advanced\"] = True\n build_config[\"mirostat_eta\"][\"value\"] = None\n build_config[\"mirostat_tau\"][\"value\"] = None\n\n else:\n build_config[\"mirostat_eta\"][\"advanced\"] = False\n build_config[\"mirostat_tau\"][\"advanced\"] = False\n\n if field_value == \"Mirostat 2.0\":\n build_config[\"mirostat_eta\"][\"value\"] = 0.2\n build_config[\"mirostat_tau\"][\"value\"] = 10\n else:\n build_config[\"mirostat_eta\"][\"value\"] = 0.1\n build_config[\"mirostat_tau\"][\"value\"] = 5\n\n if field_name in {\"base_url\", \"model_name\"}:\n if build_config[\"base_url\"].get(\"load_from_db\", False):\n base_url_value = await self.get_variables(build_config[\"base_url\"].get(\"value\", \"\"), \"base_url\")\n else:\n base_url_value = build_config[\"base_url\"].get(\"value\", \"\")\n\n if not await self.is_valid_ollama_url(base_url_value):\n # Check if any URL in the list is valid\n valid_url = \"\"\n check_urls = URL_LIST\n if self.base_url:\n check_urls = [self.base_url, *URL_LIST]\n for url in check_urls:\n if await self.is_valid_ollama_url(url):\n valid_url = url\n break\n if valid_url != \"\":\n build_config[\"base_url\"][\"value\"] = valid_url\n else:\n msg = \"No valid Ollama URL found.\"\n raise ValueError(msg)\n if field_name in {\"model_name\", \"base_url\", \"tool_model_enabled\"}:\n if await self.is_valid_ollama_url(self.base_url):\n tool_model_enabled = build_config[\"tool_model_enabled\"].get(\"value\", False) or self.tool_model_enabled\n build_config[\"model_name\"][\"options\"] = await self.get_models(\n self.base_url, tool_model_enabled=tool_model_enabled\n )\n elif await self.is_valid_ollama_url(build_config[\"base_url\"].get(\"value\", \"\")):\n tool_model_enabled = build_config[\"tool_model_enabled\"].get(\"value\", False) or self.tool_model_enabled\n build_config[\"model_name\"][\"options\"] = await self.get_models(\n build_config[\"base_url\"].get(\"value\", \"\"), tool_model_enabled=tool_model_enabled\n )\n else:\n build_config[\"model_name\"][\"options\"] = []\n if field_name == \"keep_alive_flag\":\n if field_value == \"Keep\":\n build_config[\"keep_alive\"][\"value\"] = \"-1\"\n build_config[\"keep_alive\"][\"advanced\"] = True\n elif field_value == \"Immediately\":\n build_config[\"keep_alive\"][\"value\"] = \"0\"\n build_config[\"keep_alive\"][\"advanced\"] = True\n else:\n build_config[\"keep_alive\"][\"advanced\"] = False\n\n return build_config\n\n async def get_models(self, base_url_value: str, *, tool_model_enabled: bool | None = None) -> list[str]:\n \"\"\"Fetches a list of models from the Ollama API that do not have the \"embedding\" capability.\n\n Args:\n base_url_value (str): The base URL of the Ollama API.\n tool_model_enabled (bool | None, optional): If True, filters the models further to include\n only those that support tool calling. Defaults to None.\n\n Returns:\n list[str]: A list of model names that do not have the \"embedding\" capability. If\n `tool_model_enabled` is True, only models supporting tool calling are included.\n\n Raises:\n ValueError: If there is an issue with the API request or response, or if the model\n names cannot be retrieved.\n \"\"\"\n try:\n # Normalize the base URL to avoid the repeated \"/\" at the end\n base_url = base_url_value.rstrip(\"/\") + \"/\"\n\n # Ollama REST API to return models\n tags_url = urljoin(base_url, \"api/tags\")\n\n # Ollama REST API to return model capabilities\n show_url = urljoin(base_url, \"api/show\")\n\n async with httpx.AsyncClient() as client:\n # Fetch available models\n tags_response = await client.get(tags_url)\n tags_response.raise_for_status()\n models = tags_response.json()\n if asyncio.iscoroutine(models):\n models = await models\n await logger.adebug(f\"Available models: {models}\")\n\n # Filter models that are NOT embedding models\n model_ids = []\n for model in models[self.JSON_MODELS_KEY]:\n model_name = model[self.JSON_NAME_KEY]\n await logger.adebug(f\"Checking model: {model_name}\")\n\n payload = {\"model\": model_name}\n show_response = await client.post(show_url, json=payload)\n show_response.raise_for_status()\n json_data = show_response.json()\n if asyncio.iscoroutine(json_data):\n json_data = await json_data\n capabilities = json_data.get(self.JSON_CAPABILITIES_KEY, [])\n await logger.adebug(f\"Model: {model_name}, Capabilities: {capabilities}\")\n\n if self.DESIRED_CAPABILITY in capabilities and (\n not tool_model_enabled or self.TOOL_CALLING_CAPABILITY in capabilities\n ):\n model_ids.append(model_name)\n\n except (httpx.RequestError, ValueError) as e:\n msg = \"Could not get model names from Ollama.\"\n raise ValueError(msg) from e\n\n return model_ids\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "format": { - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "load_from_db": false, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "format", - "value": "", - "display_name": "Format", - "advanced": true, - "input_types": [ - "Message" - ], - "dynamic": false, - "info": "Specify the format of the output (e.g., json).", - "title_case": false, - "type": "str", - "_input_type": "MessageTextInput" - }, - "input_value": { - "trace_as_input": true, - "tool_mode": false, - "trace_as_metadata": true, - "load_from_db": false, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "input_value", - "value": "", - "display_name": "Input", - "advanced": false, - "input_types": [ - "Message" - ], - "dynamic": false, - "info": "", - "title_case": false, - "type": "str", - "_input_type": "MessageInput" - }, - "metadata": { - "tool_mode": false, - "trace_as_input": true, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "metadata", - "value": {}, - "display_name": "Metadata", - "advanced": true, - "dynamic": false, - "info": "Metadata to add to the run trace.", - "title_case": false, - "type": "dict", - "_input_type": "DictInput" - }, - "mirostat": { - "tool_mode": false, - "trace_as_metadata": true, - "options": [ - "Disabled", - "Mirostat", - "Mirostat 2.0" - ], - "options_metadata": [], - "combobox": false, - "dialog_inputs": {}, - "toggle": false, - "required": false, - "placeholder": "", - "show": true, - "name": "mirostat", - "value": "Disabled", - "display_name": "Mirostat", - "advanced": true, - "dynamic": false, - "info": "Enable/disable Mirostat sampling for controlling perplexity.", - "real_time_refresh": true, - "title_case": false, - "external_options": {}, - "type": "str", - "_input_type": "DropdownInput" - }, - "mirostat_eta": { - "tool_mode": false, - "trace_as_metadata": true, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "mirostat_eta", - "value": "", - "display_name": "Mirostat Eta", - "advanced": true, - "dynamic": false, - "info": "Learning rate for Mirostat algorithm. (Default: 0.1)", - "title_case": false, - "type": "float", - "_input_type": "FloatInput" - }, - "mirostat_tau": { - "tool_mode": false, - "trace_as_metadata": true, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "mirostat_tau", - "value": "", - "display_name": "Mirostat Tau", - "advanced": true, - "dynamic": false, - "info": "Controls the balance between coherence and diversity of the output. (Default: 5.0)", - "title_case": false, - "type": "float", - "_input_type": "FloatInput" - }, - "model_name": { - "tool_mode": false, - "trace_as_metadata": true, - "options": [], - "options_metadata": [], - "combobox": false, - "dialog_inputs": {}, - "toggle": false, - "required": false, - "placeholder": "", - "show": true, - "name": "model_name", - "value": "", - "display_name": "Model Name", - "advanced": false, - "dynamic": false, - "info": "Refer to https://ollama.com/library for more models.", - "real_time_refresh": true, - "refresh_button": true, - "title_case": false, - "external_options": {}, - "type": "str", - "_input_type": "DropdownInput" - }, - "num_ctx": { - "tool_mode": false, - "trace_as_metadata": true, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "num_ctx", - "value": "", - "display_name": "Context Window Size", - "advanced": true, - "dynamic": false, - "info": "Size of the context window for generating tokens. (Default: 2048)", - "title_case": false, - "type": "int", - "_input_type": "IntInput" - }, - "num_gpu": { - "tool_mode": false, - "trace_as_metadata": true, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "num_gpu", - "value": "", - "display_name": "Number of GPUs", - "advanced": true, - "dynamic": false, - "info": "Number of GPUs to use for computation. (Default: 1 on macOS, 0 to disable)", - "title_case": false, - "type": "int", - "_input_type": "IntInput" - }, - "num_thread": { - "tool_mode": false, - "trace_as_metadata": true, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "num_thread", - "value": "", - "display_name": "Number of Threads", - "advanced": true, - "dynamic": false, - "info": "Number of threads to use during computation. (Default: detected for optimal performance)", - "title_case": false, - "type": "int", - "_input_type": "IntInput" - }, - "repeat_last_n": { - "tool_mode": false, - "trace_as_metadata": true, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "repeat_last_n", - "value": "", - "display_name": "Repeat Last N", - "advanced": true, - "dynamic": false, - "info": "How far back the model looks to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)", - "title_case": false, - "type": "int", - "_input_type": "IntInput" - }, - "repeat_penalty": { - "tool_mode": false, - "trace_as_metadata": true, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "repeat_penalty", - "value": "", - "display_name": "Repeat Penalty", - "advanced": true, - "dynamic": false, - "info": "Penalty for repetitions in generated text. (Default: 1.1)", - "title_case": false, - "type": "float", - "_input_type": "FloatInput" - }, - "stop_tokens": { - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "load_from_db": false, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "stop_tokens", - "value": "", - "display_name": "Stop Tokens", - "advanced": true, - "input_types": [ - "Message" - ], - "dynamic": false, - "info": "Comma-separated list of tokens to signal the model to stop generating text.", - "title_case": false, - "type": "str", - "_input_type": "MessageTextInput" - }, - "stream": { - "tool_mode": false, - "trace_as_metadata": true, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "stream", - "value": false, - "display_name": "Stream", - "advanced": true, - "dynamic": false, - "info": "Stream the response from the model. Streaming works only in Chat.", - "title_case": false, - "type": "bool", - "_input_type": "BoolInput" - }, - "system": { - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "load_from_db": false, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "system", - "value": "", - "display_name": "System", - "advanced": true, - "input_types": [ - "Message" - ], - "dynamic": false, - "info": "System to use for generating text.", - "title_case": false, - "type": "str", - "_input_type": "MessageTextInput" - }, - "system_message": { - "tool_mode": false, - "trace_as_input": true, - "multiline": true, - "trace_as_metadata": true, - "load_from_db": false, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "system_message", - "value": "", - "display_name": "System Message", - "advanced": false, - "input_types": [ - "Message" - ], - "dynamic": false, - "info": "System message to pass to the model.", - "title_case": false, - "copy_field": false, - "type": "str", - "_input_type": "MultilineInput" - }, - "tags": { - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "load_from_db": false, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "tags", - "value": "", - "display_name": "Tags", - "advanced": true, - "input_types": [ - "Message" - ], - "dynamic": false, - "info": "Comma-separated list of tags to add to the run trace.", - "title_case": false, - "type": "str", - "_input_type": "MessageTextInput" - }, - "temperature": { - "tool_mode": false, - "min_label": "", - "max_label": "", - "min_label_icon": "", - "max_label_icon": "", - "slider_buttons": false, - "slider_buttons_options": [], - "slider_input": false, - "range_spec": { - "step_type": "float", - "min": 0, - "max": 1, - "step": 0.01 - }, - "required": false, - "placeholder": "", - "show": true, - "name": "temperature", - "value": 0.1, - "display_name": "Temperature", - "advanced": true, - "dynamic": false, - "info": "", - "title_case": false, - "type": "slider", - "_input_type": "SliderInput" - }, - "template": { - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "load_from_db": false, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "template", - "value": "", - "display_name": "Template", - "advanced": true, - "input_types": [ - "Message" - ], - "dynamic": false, - "info": "Template to use for generating text.", - "title_case": false, - "type": "str", - "_input_type": "MessageTextInput" - }, - "tfs_z": { - "tool_mode": false, - "trace_as_metadata": true, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "tfs_z", - "value": "", - "display_name": "TFS Z", - "advanced": true, - "dynamic": false, - "info": "Tail free sampling value. (Default: 1)", - "title_case": false, - "type": "float", - "_input_type": "FloatInput" - }, - "timeout": { - "tool_mode": false, - "trace_as_metadata": true, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "timeout", - "value": "", - "display_name": "Timeout", - "advanced": true, - "dynamic": false, - "info": "Timeout for the request stream.", - "title_case": false, - "type": "int", - "_input_type": "IntInput" - }, - "tool_model_enabled": { - "tool_mode": false, - "trace_as_metadata": true, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "tool_model_enabled", - "value": true, - "display_name": "Tool Model Enabled", - "advanced": false, - "dynamic": false, - "info": "Whether to enable tool calling in the model.", - "real_time_refresh": true, - "title_case": false, - "type": "bool", - "_input_type": "BoolInput" - }, - "top_k": { - "tool_mode": false, - "trace_as_metadata": true, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "top_k", - "value": "", - "display_name": "Top K", - "advanced": true, - "dynamic": false, - "info": "Limits token selection to top K. (Default: 40)", - "title_case": false, - "type": "int", - "_input_type": "IntInput" - }, - "top_p": { - "tool_mode": false, - "trace_as_metadata": true, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "top_p", - "value": "", - "display_name": "Top P", - "advanced": true, - "dynamic": false, - "info": "Works together with top-k. (Default: 0.9)", - "title_case": false, - "type": "float", - "_input_type": "FloatInput" - }, - "verbose": { - "tool_mode": false, - "trace_as_metadata": true, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "verbose", - "value": false, - "display_name": "Verbose", - "advanced": true, - "dynamic": false, - "info": "Whether to print out response text.", - "title_case": false, - "type": "bool", - "_input_type": "BoolInput" - } - }, - "description": "Generate text using Ollama Local LLMs.", - "icon": "Ollama", - "base_classes": [ - "LanguageModel", - "Message" - ], - "display_name": "Ollama", - "documentation": "", - "minimized": false, - "custom_fields": {}, - "output_types": [], - "pinned": false, - "conditional_paths": [], - "frozen": false, - "outputs": [ - { - "types": [ - "Message" - ], - "selected": "Message", - "name": "text_output", - "display_name": "Model Response", - "method": "text_response", - "value": "__UNDEFINED__", - "cache": true, - "required_inputs": null, - "allows_loop": false, - "group_outputs": false, - "options": null, - "tool_mode": true - }, - { - "types": [ - "LanguageModel" - ], - "selected": "LanguageModel", - "name": "model_output", - "display_name": "Language Model", - "method": "build_model", - "value": "__UNDEFINED__", - "cache": true, - "required_inputs": null, - "allows_loop": false, - "group_outputs": false, - "options": null, - "tool_mode": true - } - ], - "field_order": [ - "base_url", - "model_name", - "temperature", - "format", - "metadata", - "mirostat", - "mirostat_eta", - "mirostat_tau", - "num_ctx", - "num_gpu", - "num_thread", - "repeat_last_n", - "repeat_penalty", - "tfs_z", - "timeout", - "top_k", - "top_p", - "verbose", - "tags", - "stop_tokens", - "system", - "tool_model_enabled", - "template", - "input_value", - "system_message", - "stream" - ], - "beta": false, - "legacy": false, - "edited": false, - "metadata": { - "keywords": [ - "model", - "llm", - "language model", - "large language model" - ], - "module": "lfx.components.ollama.ollama.ChatOllamaComponent", - "code_hash": "54de3b5da388", - "dependencies": { - "total_dependencies": 3, - "dependencies": [ - { - "name": "httpx", - "version": "0.28.1" - }, - { - "name": "langchain_ollama", - "version": "0.2.1" - }, - { - "name": "lfx", - "version": null - } - ] - } - }, - "tool_mode": false, - "last_updated": "2025-09-29T18:39:30.798Z", - "official": false + "node": { + "template": { + "_type": "Component", + "base_url": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "base_url", + "value": "OLLAMA_BASE_URL", + "display_name": "Base URL", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Endpoint of the Ollama API.", + "real_time_refresh": true, + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "import asyncio\nfrom typing import Any\nfrom urllib.parse import urljoin\n\nimport httpx\nfrom langchain_ollama import ChatOllama\n\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.base.models.ollama_constants import URL_LIST\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, MessageTextInput, SliderInput\nfrom lfx.log.logger import logger\n\nHTTP_STATUS_OK = 200\n\n\nclass ChatOllamaComponent(LCModelComponent):\n display_name = \"Ollama\"\n description = \"Generate text using Ollama Local LLMs.\"\n icon = \"Ollama\"\n name = \"OllamaModel\"\n\n # Define constants for JSON keys\n JSON_MODELS_KEY = \"models\"\n JSON_NAME_KEY = \"name\"\n JSON_CAPABILITIES_KEY = \"capabilities\"\n DESIRED_CAPABILITY = \"completion\"\n TOOL_CALLING_CAPABILITY = \"tools\"\n\n inputs = [\n MessageTextInput(\n name=\"base_url\",\n display_name=\"Base URL\",\n info=\"Endpoint of the Ollama API.\",\n value=\"\",\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=[],\n info=\"Refer to https://ollama.com/library for more models.\",\n refresh_button=True,\n real_time_refresh=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n MessageTextInput(\n name=\"format\", display_name=\"Format\", info=\"Specify the format of the output (e.g., json).\", advanced=True\n ),\n DictInput(name=\"metadata\", display_name=\"Metadata\", info=\"Metadata to add to the run trace.\", advanced=True),\n DropdownInput(\n name=\"mirostat\",\n display_name=\"Mirostat\",\n options=[\"Disabled\", \"Mirostat\", \"Mirostat 2.0\"],\n info=\"Enable/disable Mirostat sampling for controlling perplexity.\",\n value=\"Disabled\",\n advanced=True,\n real_time_refresh=True,\n ),\n FloatInput(\n name=\"mirostat_eta\",\n display_name=\"Mirostat Eta\",\n info=\"Learning rate for Mirostat algorithm. (Default: 0.1)\",\n advanced=True,\n ),\n FloatInput(\n name=\"mirostat_tau\",\n display_name=\"Mirostat Tau\",\n info=\"Controls the balance between coherence and diversity of the output. (Default: 5.0)\",\n advanced=True,\n ),\n IntInput(\n name=\"num_ctx\",\n display_name=\"Context Window Size\",\n info=\"Size of the context window for generating tokens. (Default: 2048)\",\n advanced=True,\n ),\n IntInput(\n name=\"num_gpu\",\n display_name=\"Number of GPUs\",\n info=\"Number of GPUs to use for computation. (Default: 1 on macOS, 0 to disable)\",\n advanced=True,\n ),\n IntInput(\n name=\"num_thread\",\n display_name=\"Number of Threads\",\n info=\"Number of threads to use during computation. (Default: detected for optimal performance)\",\n advanced=True,\n ),\n IntInput(\n name=\"repeat_last_n\",\n display_name=\"Repeat Last N\",\n info=\"How far back the model looks to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)\",\n advanced=True,\n ),\n FloatInput(\n name=\"repeat_penalty\",\n display_name=\"Repeat Penalty\",\n info=\"Penalty for repetitions in generated text. (Default: 1.1)\",\n advanced=True,\n ),\n FloatInput(name=\"tfs_z\", display_name=\"TFS Z\", info=\"Tail free sampling value. (Default: 1)\", advanced=True),\n IntInput(name=\"timeout\", display_name=\"Timeout\", info=\"Timeout for the request stream.\", advanced=True),\n IntInput(\n name=\"top_k\", display_name=\"Top K\", info=\"Limits token selection to top K. (Default: 40)\", advanced=True\n ),\n FloatInput(name=\"top_p\", display_name=\"Top P\", info=\"Works together with top-k. (Default: 0.9)\", advanced=True),\n BoolInput(name=\"verbose\", display_name=\"Verbose\", info=\"Whether to print out response text.\", advanced=True),\n MessageTextInput(\n name=\"tags\",\n display_name=\"Tags\",\n info=\"Comma-separated list of tags to add to the run trace.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"stop_tokens\",\n display_name=\"Stop Tokens\",\n info=\"Comma-separated list of tokens to signal the model to stop generating text.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"system\", display_name=\"System\", info=\"System to use for generating text.\", advanced=True\n ),\n BoolInput(\n name=\"tool_model_enabled\",\n display_name=\"Tool Model Enabled\",\n info=\"Whether to enable tool calling in the model.\",\n value=True,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"template\", display_name=\"Template\", info=\"Template to use for generating text.\", advanced=True\n ),\n *LCModelComponent.get_base_inputs(),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # Mapping mirostat settings to their corresponding values\n mirostat_options = {\"Mirostat\": 1, \"Mirostat 2.0\": 2}\n\n # Default to 0 for 'Disabled'\n mirostat_value = mirostat_options.get(self.mirostat, 0)\n\n # Set mirostat_eta and mirostat_tau to None if mirostat is disabled\n if mirostat_value == 0:\n mirostat_eta = None\n mirostat_tau = None\n else:\n mirostat_eta = self.mirostat_eta\n mirostat_tau = self.mirostat_tau\n\n # Mapping system settings to their corresponding values\n llm_params = {\n \"base_url\": self.base_url,\n \"model\": self.model_name,\n \"mirostat\": mirostat_value,\n \"format\": self.format,\n \"metadata\": self.metadata,\n \"tags\": self.tags.split(\",\") if self.tags else None,\n \"mirostat_eta\": mirostat_eta,\n \"mirostat_tau\": mirostat_tau,\n \"num_ctx\": self.num_ctx or None,\n \"num_gpu\": self.num_gpu or None,\n \"num_thread\": self.num_thread or None,\n \"repeat_last_n\": self.repeat_last_n or None,\n \"repeat_penalty\": self.repeat_penalty or None,\n \"temperature\": self.temperature or None,\n \"stop\": self.stop_tokens.split(\",\") if self.stop_tokens else None,\n \"system\": self.system,\n \"tfs_z\": self.tfs_z or None,\n \"timeout\": self.timeout or None,\n \"top_k\": self.top_k or None,\n \"top_p\": self.top_p or None,\n \"verbose\": self.verbose,\n \"template\": self.template,\n }\n\n # Remove parameters with None values\n llm_params = {k: v for k, v in llm_params.items() if v is not None}\n\n try:\n output = ChatOllama(**llm_params)\n except Exception as e:\n msg = (\n \"Unable to connect to the Ollama API. \",\n \"Please verify the base URL, ensure the relevant Ollama model is pulled, and try again.\",\n )\n raise ValueError(msg) from e\n\n return output\n\n async def is_valid_ollama_url(self, url: str) -> bool:\n try:\n async with httpx.AsyncClient() as client:\n return (await client.get(urljoin(url, \"api/tags\"))).status_code == HTTP_STATUS_OK\n except httpx.RequestError:\n return False\n\n async def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None):\n if field_name == \"mirostat\":\n if field_value == \"Disabled\":\n build_config[\"mirostat_eta\"][\"advanced\"] = True\n build_config[\"mirostat_tau\"][\"advanced\"] = True\n build_config[\"mirostat_eta\"][\"value\"] = None\n build_config[\"mirostat_tau\"][\"value\"] = None\n\n else:\n build_config[\"mirostat_eta\"][\"advanced\"] = False\n build_config[\"mirostat_tau\"][\"advanced\"] = False\n\n if field_value == \"Mirostat 2.0\":\n build_config[\"mirostat_eta\"][\"value\"] = 0.2\n build_config[\"mirostat_tau\"][\"value\"] = 10\n else:\n build_config[\"mirostat_eta\"][\"value\"] = 0.1\n build_config[\"mirostat_tau\"][\"value\"] = 5\n\n if field_name in {\"base_url\", \"model_name\"}:\n if build_config[\"base_url\"].get(\"load_from_db\", False):\n base_url_value = await self.get_variables(build_config[\"base_url\"].get(\"value\", \"\"), \"base_url\")\n else:\n base_url_value = build_config[\"base_url\"].get(\"value\", \"\")\n\n if not await self.is_valid_ollama_url(base_url_value):\n # Check if any URL in the list is valid\n valid_url = \"\"\n check_urls = URL_LIST\n if self.base_url:\n check_urls = [self.base_url, *URL_LIST]\n for url in check_urls:\n if await self.is_valid_ollama_url(url):\n valid_url = url\n break\n if valid_url != \"\":\n build_config[\"base_url\"][\"value\"] = valid_url\n else:\n msg = \"No valid Ollama URL found.\"\n raise ValueError(msg)\n if field_name in {\"model_name\", \"base_url\", \"tool_model_enabled\"}:\n if await self.is_valid_ollama_url(self.base_url):\n tool_model_enabled = build_config[\"tool_model_enabled\"].get(\"value\", False) or self.tool_model_enabled\n build_config[\"model_name\"][\"options\"] = await self.get_models(\n self.base_url, tool_model_enabled=tool_model_enabled\n )\n elif await self.is_valid_ollama_url(build_config[\"base_url\"].get(\"value\", \"\")):\n tool_model_enabled = build_config[\"tool_model_enabled\"].get(\"value\", False) or self.tool_model_enabled\n build_config[\"model_name\"][\"options\"] = await self.get_models(\n build_config[\"base_url\"].get(\"value\", \"\"), tool_model_enabled=tool_model_enabled\n )\n else:\n build_config[\"model_name\"][\"options\"] = []\n if field_name == \"keep_alive_flag\":\n if field_value == \"Keep\":\n build_config[\"keep_alive\"][\"value\"] = \"-1\"\n build_config[\"keep_alive\"][\"advanced\"] = True\n elif field_value == \"Immediately\":\n build_config[\"keep_alive\"][\"value\"] = \"0\"\n build_config[\"keep_alive\"][\"advanced\"] = True\n else:\n build_config[\"keep_alive\"][\"advanced\"] = False\n\n return build_config\n\n async def get_models(self, base_url_value: str, *, tool_model_enabled: bool | None = None) -> list[str]:\n \"\"\"Fetches a list of models from the Ollama API that do not have the \"embedding\" capability.\n\n Args:\n base_url_value (str): The base URL of the Ollama API.\n tool_model_enabled (bool | None, optional): If True, filters the models further to include\n only those that support tool calling. Defaults to None.\n\n Returns:\n list[str]: A list of model names that do not have the \"embedding\" capability. If\n `tool_model_enabled` is True, only models supporting tool calling are included.\n\n Raises:\n ValueError: If there is an issue with the API request or response, or if the model\n names cannot be retrieved.\n \"\"\"\n try:\n # Normalize the base URL to avoid the repeated \"/\" at the end\n base_url = base_url_value.rstrip(\"/\") + \"/\"\n\n # Ollama REST API to return models\n tags_url = urljoin(base_url, \"api/tags\")\n\n # Ollama REST API to return model capabilities\n show_url = urljoin(base_url, \"api/show\")\n\n async with httpx.AsyncClient() as client:\n # Fetch available models\n tags_response = await client.get(tags_url)\n tags_response.raise_for_status()\n models = tags_response.json()\n if asyncio.iscoroutine(models):\n models = await models\n await logger.adebug(f\"Available models: {models}\")\n\n # Filter models that are NOT embedding models\n model_ids = []\n for model in models[self.JSON_MODELS_KEY]:\n model_name = model[self.JSON_NAME_KEY]\n await logger.adebug(f\"Checking model: {model_name}\")\n\n payload = {\"model\": model_name}\n show_response = await client.post(show_url, json=payload)\n show_response.raise_for_status()\n json_data = show_response.json()\n if asyncio.iscoroutine(json_data):\n json_data = await json_data\n capabilities = json_data.get(self.JSON_CAPABILITIES_KEY, [])\n await logger.adebug(f\"Model: {model_name}, Capabilities: {capabilities}\")\n\n if self.DESIRED_CAPABILITY in capabilities and (\n not tool_model_enabled or self.TOOL_CALLING_CAPABILITY in capabilities\n ):\n model_ids.append(model_name)\n\n except (httpx.RequestError, ValueError) as e:\n msg = \"Could not get model names from Ollama.\"\n raise ValueError(msg) from e\n\n return model_ids\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "format": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "format", + "value": "", + "display_name": "Format", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Specify the format of the output (e.g., json).", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "input_value": { + "trace_as_input": true, + "tool_mode": false, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "input_value", + "value": "", + "display_name": "Input", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "", + "title_case": false, + "type": "str", + "_input_type": "MessageInput" + }, + "metadata": { + "tool_mode": false, + "trace_as_input": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "metadata", + "value": {}, + "display_name": "Metadata", + "advanced": true, + "dynamic": false, + "info": "Metadata to add to the run trace.", + "title_case": false, + "type": "dict", + "_input_type": "DictInput" + }, + "mirostat": { + "tool_mode": false, + "trace_as_metadata": true, + "options": [ + "Disabled", + "Mirostat", + "Mirostat 2.0" + ], + "options_metadata": [], + "combobox": false, + "dialog_inputs": {}, + "toggle": false, + "required": false, + "placeholder": "", + "show": true, + "name": "mirostat", + "value": "Disabled", + "display_name": "Mirostat", + "advanced": true, + "dynamic": false, + "info": "Enable/disable Mirostat sampling for controlling perplexity.", + "real_time_refresh": true, + "title_case": false, + "external_options": {}, + "type": "str", + "_input_type": "DropdownInput" + }, + "mirostat_eta": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "mirostat_eta", + "value": "", + "display_name": "Mirostat Eta", + "advanced": true, + "dynamic": false, + "info": "Learning rate for Mirostat algorithm. (Default: 0.1)", + "title_case": false, + "type": "float", + "_input_type": "FloatInput" + }, + "mirostat_tau": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "mirostat_tau", + "value": "", + "display_name": "Mirostat Tau", + "advanced": true, + "dynamic": false, + "info": "Controls the balance between coherence and diversity of the output. (Default: 5.0)", + "title_case": false, + "type": "float", + "_input_type": "FloatInput" + }, + "model_name": { + "tool_mode": false, + "trace_as_metadata": true, + "options": [], + "options_metadata": [], + "combobox": false, + "dialog_inputs": {}, + "toggle": false, + "required": false, + "placeholder": "", + "show": true, + "name": "model_name", + "value": "", + "display_name": "Model Name", + "advanced": false, + "dynamic": false, + "info": "Refer to https://ollama.com/library for more models.", + "real_time_refresh": true, + "refresh_button": true, + "title_case": false, + "external_options": {}, + "type": "str", + "_input_type": "DropdownInput" + }, + "num_ctx": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "num_ctx", + "value": "", + "display_name": "Context Window Size", + "advanced": true, + "dynamic": false, + "info": "Size of the context window for generating tokens. (Default: 2048)", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "num_gpu": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "num_gpu", + "value": "", + "display_name": "Number of GPUs", + "advanced": true, + "dynamic": false, + "info": "Number of GPUs to use for computation. (Default: 1 on macOS, 0 to disable)", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "num_thread": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "num_thread", + "value": "", + "display_name": "Number of Threads", + "advanced": true, + "dynamic": false, + "info": "Number of threads to use during computation. (Default: detected for optimal performance)", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "repeat_last_n": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "repeat_last_n", + "value": "", + "display_name": "Repeat Last N", + "advanced": true, + "dynamic": false, + "info": "How far back the model looks to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "repeat_penalty": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "repeat_penalty", + "value": "", + "display_name": "Repeat Penalty", + "advanced": true, + "dynamic": false, + "info": "Penalty for repetitions in generated text. (Default: 1.1)", + "title_case": false, + "type": "float", + "_input_type": "FloatInput" + }, + "stop_tokens": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "stop_tokens", + "value": "", + "display_name": "Stop Tokens", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Comma-separated list of tokens to signal the model to stop generating text.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "stream": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "stream", + "value": false, + "display_name": "Stream", + "advanced": true, + "dynamic": false, + "info": "Stream the response from the model. Streaming works only in Chat.", + "title_case": false, + "type": "bool", + "_input_type": "BoolInput" + }, + "system": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "system", + "value": "", + "display_name": "System", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "System to use for generating text.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "system_message": { + "tool_mode": false, + "trace_as_input": true, + "multiline": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "system_message", + "value": "", + "display_name": "System Message", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "System message to pass to the model.", + "title_case": false, + "copy_field": false, + "type": "str", + "_input_type": "MultilineInput" + }, + "tags": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "tags", + "value": "", + "display_name": "Tags", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Comma-separated list of tags to add to the run trace.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "temperature": { + "tool_mode": false, + "min_label": "", + "max_label": "", + "min_label_icon": "", + "max_label_icon": "", + "slider_buttons": false, + "slider_buttons_options": [], + "slider_input": false, + "range_spec": { + "step_type": "float", + "min": 0, + "max": 1, + "step": 0.01 }, - "showNode": true, - "type": "OllamaModel", - "id": "OllamaModel-8Re0J", - "selected_output": "text_output" + "required": false, + "placeholder": "", + "show": true, + "name": "temperature", + "value": 0.1, + "display_name": "Temperature", + "advanced": true, + "dynamic": false, + "info": "", + "title_case": false, + "type": "slider", + "_input_type": "SliderInput" }, - "id": "OllamaModel-8Re0J", - "position": { - "x": 0, - "y": 0 + "template": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "template", + "value": "", + "display_name": "Template", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Template to use for generating text.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" }, - "type": "genericNode" - } - ], - "viewport": { - "x": 1, - "y": 1, - "zoom": 1 - } + "tfs_z": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "tfs_z", + "value": "", + "display_name": "TFS Z", + "advanced": true, + "dynamic": false, + "info": "Tail free sampling value. (Default: 1)", + "title_case": false, + "type": "float", + "_input_type": "FloatInput" + }, + "timeout": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "timeout", + "value": "", + "display_name": "Timeout", + "advanced": true, + "dynamic": false, + "info": "Timeout for the request stream.", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "tool_model_enabled": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "tool_model_enabled", + "value": true, + "display_name": "Tool Model Enabled", + "advanced": false, + "dynamic": false, + "info": "Whether to enable tool calling in the model.", + "real_time_refresh": true, + "title_case": false, + "type": "bool", + "_input_type": "BoolInput" + }, + "top_k": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "top_k", + "value": "", + "display_name": "Top K", + "advanced": true, + "dynamic": false, + "info": "Limits token selection to top K. (Default: 40)", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "top_p": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "top_p", + "value": "", + "display_name": "Top P", + "advanced": true, + "dynamic": false, + "info": "Works together with top-k. (Default: 0.9)", + "title_case": false, + "type": "float", + "_input_type": "FloatInput" + }, + "verbose": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "verbose", + "value": false, + "display_name": "Verbose", + "advanced": true, + "dynamic": false, + "info": "Whether to print out response text.", + "title_case": false, + "type": "bool", + "_input_type": "BoolInput" + } + }, + "description": "Generate text using Ollama Local LLMs.", + "icon": "Ollama", + "base_classes": [ + "LanguageModel", + "Message" + ], + "display_name": "Ollama", + "documentation": "", + "minimized": false, + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Message" + ], + "selected": "Message", + "name": "text_output", + "display_name": "Model Response", + "method": "text_response", + "value": "__UNDEFINED__", + "cache": true, + "required_inputs": null, + "allows_loop": false, + "group_outputs": false, + "options": null, + "tool_mode": true + }, + { + "types": [ + "LanguageModel" + ], + "selected": "LanguageModel", + "name": "model_output", + "display_name": "Language Model", + "method": "build_model", + "value": "__UNDEFINED__", + "cache": true, + "required_inputs": null, + "allows_loop": false, + "group_outputs": false, + "options": null, + "tool_mode": true + } + ], + "field_order": [ + "base_url", + "model_name", + "temperature", + "format", + "metadata", + "mirostat", + "mirostat_eta", + "mirostat_tau", + "num_ctx", + "num_gpu", + "num_thread", + "repeat_last_n", + "repeat_penalty", + "tfs_z", + "timeout", + "top_k", + "top_p", + "verbose", + "tags", + "stop_tokens", + "system", + "tool_model_enabled", + "template", + "input_value", + "system_message", + "stream" + ], + "beta": false, + "legacy": false, + "edited": false, + "metadata": { + "keywords": [ + "model", + "llm", + "language model", + "large language model" + ], + "module": "lfx.components.ollama.ollama.ChatOllamaComponent", + "code_hash": "54de3b5da388", + "dependencies": { + "total_dependencies": 3, + "dependencies": [ + { + "name": "httpx", + "version": "0.28.1" + }, + { + "name": "langchain_ollama", + "version": "0.2.1" + }, + { + "name": "lfx", + "version": null + } + ] + } + }, + "tool_mode": false, + "last_updated": "2025-09-29T18:39:30.798Z", + "official": false + }, + "showNode": true, + "type": "OllamaModel", + "id": "OllamaModel-8Re0J", + "selected_output": "text_output" }, - "description": "Generate text using Ollama Local LLMs.", - "name": "Ollama", "id": "OllamaModel-8Re0J", - "is_component": true, - "last_tested_version": "1.6.0" + "position": { + "x": 0, + "y": 0 + }, + "type": "genericNode" } \ No newline at end of file diff --git a/flows/components/watsonx_embedding.json b/flows/components/watsonx_embedding.json index 31376819..1d76c046 100644 --- a/flows/components/watsonx_embedding.json +++ b/flows/components/watsonx_embedding.json @@ -1 +1,207 @@ -{"data":{"edges":[],"nodes":[{"data":{"node":{"template":{"_type":"Component","api_key":{"load_from_db":false,"required":true,"placeholder":"","show":true,"name":"api_key","value":"","display_name":"Watsonx API Key","advanced":false,"input_types":[],"dynamic":false,"info":"The API Key to use for the model.","title_case":false,"password":true,"type":"str","_input_type":"SecretStrInput"},"code":{"type":"code","required":true,"placeholder":"","list":false,"show":true,"multiline":true,"value":"from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai import APIClient, Credentials\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_ibm import WatsonxEmbeddings\nfrom pydantic.v1 import SecretStr\n\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import BoolInput, DropdownInput, IntInput, SecretStrInput, StrInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\n\n\nclass WatsonxEmbeddingsComponent(LCEmbeddingsModel):\n display_name = \"IBM watsonx.ai Embeddings\"\n description = \"Generate embeddings using IBM watsonx.ai models.\"\n icon = \"WatsonxAI\"\n name = \"WatsonxEmbeddingsComponent\"\n\n # models present in all the regions\n _default_models = [\n \"sentence-transformers/all-minilm-l12-v2\",\n \"ibm/slate-125m-english-rtrvr-v2\",\n \"ibm/slate-30m-english-rtrvr-v2\",\n \"intfloat/multilingual-e5-large\",\n ]\n\n inputs = [\n DropdownInput(\n name=\"url\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API.\",\n value=None,\n options=[\n \"https://us-south.ml.cloud.ibm.com\",\n \"https://eu-de.ml.cloud.ibm.com\",\n \"https://eu-gb.ml.cloud.ibm.com\",\n \"https://au-syd.ml.cloud.ibm.com\",\n \"https://jp-tok.ml.cloud.ibm.com\",\n \"https://ca-tor.ml.cloud.ibm.com\",\n ],\n real_time_refresh=True,\n ),\n StrInput(\n name=\"project_id\",\n display_name=\"watsonx project id\",\n info=\"The project ID or deployment space ID that is associated with the foundation model.\",\n required=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Watsonx API Key\",\n info=\"The API Key to use for the model.\",\n required=True,\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=[],\n value=None,\n dynamic=True,\n required=True,\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n ),\n ]\n\n @staticmethod\n def fetch_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WatsonxEmbeddingsComponent._default_models\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):\n \"\"\"Update model options when URL or API key changes.\"\"\"\n logger.debug(\n \"Updating build config. Field name: %s, Field value: %s\",\n field_name,\n field_value,\n )\n\n if field_name == \"url\" and field_value:\n try:\n models = self.fetch_models(base_url=build_config.url.value)\n build_config.model_name.options = models\n if build_config.model_name.value:\n build_config.model_name.value = models[0]\n info_message = f\"Updated model options: {len(models)} models found in {build_config.url.value}\"\n logger.info(info_message)\n except Exception: # noqa: BLE001\n logger.exception(\"Error updating model options.\")\n\n def build_embeddings(self) -> Embeddings:\n credentials = Credentials(\n api_key=SecretStr(self.api_key).get_secret_value(),\n url=self.url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n return WatsonxEmbeddings(\n model_id=self.model_name,\n params=params,\n watsonx_client=api_client,\n project_id=self.project_id,\n )\n","fileTypes":[],"file_path":"","password":false,"name":"code","advanced":true,"dynamic":true,"info":"","load_from_db":false,"title_case":false},"input_text":{"tool_mode":false,"trace_as_metadata":true,"list":false,"list_add_label":"Add More","required":false,"placeholder":"","show":true,"name":"input_text","value":true,"display_name":"Include the original text in the output","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"bool","_input_type":"BoolInput"},"model_name":{"tool_mode":false,"trace_as_metadata":true,"options":[],"options_metadata":[],"combobox":false,"dialog_inputs":{},"toggle":false,"required":true,"placeholder":"","show":true,"name":"model_name","display_name":"Model Name","advanced":false,"dynamic":true,"info":"","title_case":false,"external_options":{},"type":"str","_input_type":"DropdownInput"},"project_id":{"tool_mode":false,"trace_as_metadata":true,"load_from_db":false,"list":false,"list_add_label":"Add More","required":true,"placeholder":"","show":true,"name":"project_id","value":"","display_name":"watsonx project id","advanced":false,"dynamic":false,"info":"The project ID or deployment space ID that is associated with the foundation model.","title_case":false,"type":"str","_input_type":"StrInput"},"truncate_input_tokens":{"tool_mode":false,"trace_as_metadata":true,"list":false,"list_add_label":"Add More","required":false,"placeholder":"","show":true,"name":"truncate_input_tokens","value":200,"display_name":"Truncate Input Tokens","advanced":true,"dynamic":false,"info":"","title_case":false,"type":"int","_input_type":"IntInput"},"url":{"tool_mode":false,"trace_as_metadata":true,"options":["https://us-south.ml.cloud.ibm.com","https://eu-de.ml.cloud.ibm.com","https://eu-gb.ml.cloud.ibm.com","https://au-syd.ml.cloud.ibm.com","https://jp-tok.ml.cloud.ibm.com","https://ca-tor.ml.cloud.ibm.com"],"options_metadata":[],"combobox":false,"dialog_inputs":{},"toggle":false,"required":false,"placeholder":"","show":true,"name":"url","display_name":"watsonx API Endpoint","advanced":false,"dynamic":false,"info":"The base URL of the API.","real_time_refresh":true,"title_case":false,"external_options":{},"type":"str","_input_type":"DropdownInput"}},"description":"Generate embeddings using IBM watsonx.ai models.","icon":"WatsonxAI","base_classes":["Embeddings"],"display_name":"IBM watsonx.ai Embeddings","documentation":"","minimized":false,"custom_fields":{},"output_types":[],"pinned":false,"conditional_paths":[],"frozen":false,"outputs":[{"types":["Embeddings"],"selected":"Embeddings","name":"embeddings","display_name":"Embedding Model","method":"build_embeddings","value":"__UNDEFINED__","cache":true,"allows_loop":false,"group_outputs":false,"tool_mode":true}],"field_order":["url","project_id","api_key","model_name","truncate_input_tokens","input_text"],"beta":false,"legacy":false,"edited":false,"metadata":{"module":"lfx.components.ibm.watsonx_embeddings.WatsonxEmbeddingsComponent","code_hash":"ffded413ea90","dependencies":{"total_dependencies":5,"dependencies":[{"name":"requests","version":"2.32.5"},{"name":"ibm_watsonx_ai","version":"1.3.34"},{"name":"langchain_ibm","version":"0.3.16"},{"name":"pydantic","version":"2.10.6"},{"name":"lfx","version":null}]}},"tool_mode":false,"official":false},"showNode":true,"type":"WatsonxEmbeddingsComponent","id":"WatsonxEmbeddingsComponent-q67FN"},"id":"WatsonxEmbeddingsComponent-q67FN","position":{"x":0,"y":0},"type":"genericNode"}],"viewport":{"x":1,"y":1,"zoom":1}},"description":"Generate embeddings using IBM watsonx.ai models.","name":"IBM watsonx.ai Embeddings","id":"WatsonxEmbeddingsComponent-q67FN","is_component":true,"last_tested_version":"1.6.0"} \ No newline at end of file +{ +"data": { + "node": { + "template": { + "_type": "Component", + "api_key": { + "load_from_db": true, + "required": true, + "placeholder": "", + "show": true, + "name": "api_key", + "value": "WATSONX_API_KEY", + "display_name": "Watsonx API Key", + "advanced": false, + "input_types": [], + "dynamic": false, + "info": "The API Key to use for the model.", + "title_case": false, + "password": true, + "type": "str", + "_input_type": "SecretStrInput" + }, + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai import APIClient, Credentials\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_ibm import WatsonxEmbeddings\nfrom pydantic.v1 import SecretStr\n\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import BoolInput, DropdownInput, IntInput, SecretStrInput, StrInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\n\n\nclass WatsonxEmbeddingsComponent(LCEmbeddingsModel):\n display_name = \"IBM watsonx.ai Embeddings\"\n description = \"Generate embeddings using IBM watsonx.ai models.\"\n icon = \"WatsonxAI\"\n name = \"WatsonxEmbeddingsComponent\"\n\n # models present in all the regions\n _default_models = [\n \"sentence-transformers/all-minilm-l12-v2\",\n \"ibm/slate-125m-english-rtrvr-v2\",\n \"ibm/slate-30m-english-rtrvr-v2\",\n \"intfloat/multilingual-e5-large\",\n ]\n\n inputs = [\n DropdownInput(\n name=\"url\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API.\",\n value=None,\n options=[\n \"https://us-south.ml.cloud.ibm.com\",\n \"https://eu-de.ml.cloud.ibm.com\",\n \"https://eu-gb.ml.cloud.ibm.com\",\n \"https://au-syd.ml.cloud.ibm.com\",\n \"https://jp-tok.ml.cloud.ibm.com\",\n \"https://ca-tor.ml.cloud.ibm.com\",\n ],\n real_time_refresh=True,\n ),\n StrInput(\n name=\"project_id\",\n display_name=\"watsonx project id\",\n info=\"The project ID or deployment space ID that is associated with the foundation model.\",\n required=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Watsonx API Key\",\n info=\"The API Key to use for the model.\",\n required=True,\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=[],\n value=None,\n dynamic=True,\n required=True,\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n ),\n ]\n\n @staticmethod\n def fetch_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WatsonxEmbeddingsComponent._default_models\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):\n \"\"\"Update model options when URL or API key changes.\"\"\"\n logger.debug(\n \"Updating build config. Field name: %s, Field value: %s\",\n field_name,\n field_value,\n )\n\n if field_name == \"url\" and field_value:\n try:\n models = self.fetch_models(base_url=build_config.url.value)\n build_config.model_name.options = models\n if build_config.model_name.value:\n build_config.model_name.value = models[0]\n info_message = f\"Updated model options: {len(models)} models found in {build_config.url.value}\"\n logger.info(info_message)\n except Exception: # noqa: BLE001\n logger.exception(\"Error updating model options.\")\n\n def build_embeddings(self) -> Embeddings:\n credentials = Credentials(\n api_key=SecretStr(self.api_key).get_secret_value(),\n url=self.url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n return WatsonxEmbeddings(\n model_id=self.model_name,\n params=params,\n watsonx_client=api_client,\n project_id=self.project_id,\n )\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "input_text": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "input_text", + "value": true, + "display_name": "Include the original text in the output", + "advanced": true, + "dynamic": false, + "info": "", + "title_case": false, + "type": "bool", + "_input_type": "BoolInput" + }, + "model_name": { + "tool_mode": false, + "trace_as_metadata": true, + "options": [], + "options_metadata": [], + "combobox": false, + "dialog_inputs": {}, + "toggle": false, + "required": true, + "placeholder": "", + "show": true, + "name": "model_name", + "display_name": "Model Name", + "advanced": false, + "dynamic": true, + "info": "", + "title_case": false, + "external_options": {}, + "type": "str", + "_input_type": "DropdownInput" + }, + "project_id": { + "tool_mode": false, + "trace_as_metadata": true, + "load_from_db": true, + "list": false, + "list_add_label": "Add More", + "required": true, + "placeholder": "", + "show": true, + "name": "project_id", + "value": "WATSONX_PROJECT_ID", + "display_name": "watsonx project id", + "advanced": false, + "dynamic": false, + "info": "The project ID or deployment space ID that is associated with the foundation model.", + "title_case": false, + "type": "str", + "_input_type": "StrInput" + }, + "truncate_input_tokens": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "truncate_input_tokens", + "value": 200, + "display_name": "Truncate Input Tokens", + "advanced": true, + "dynamic": false, + "info": "", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "url": { + "tool_mode": false, + "trace_as_metadata": true, + "options": [ + "https://us-south.ml.cloud.ibm.com", + "https://eu-de.ml.cloud.ibm.com", + "https://eu-gb.ml.cloud.ibm.com", + "https://au-syd.ml.cloud.ibm.com", + "https://jp-tok.ml.cloud.ibm.com", + "https://ca-tor.ml.cloud.ibm.com" + ], + "options_metadata": [], + "combobox": false, + "dialog_inputs": {}, + "toggle": false, + "required": false, + "placeholder": "", + "show": true, + "name": "url", + "display_name": "watsonx API Endpoint", + "advanced": false, + "dynamic": false, + "info": "The base URL of the API.", + "real_time_refresh": true, + "title_case": false, + "external_options": {}, + "type": "str", + "_input_type": "DropdownInput" + } + }, + "description": "Generate embeddings using IBM watsonx.ai models.", + "icon": "WatsonxAI", + "base_classes": ["Embeddings"], + "display_name": "IBM watsonx.ai Embeddings", + "documentation": "", + "minimized": false, + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": ["Embeddings"], + "selected": "Embeddings", + "name": "embeddings", + "display_name": "Embedding Model", + "method": "build_embeddings", + "value": "__UNDEFINED__", + "cache": true, + "allows_loop": false, + "group_outputs": false, + "tool_mode": true + } + ], + "field_order": [ + "url", + "project_id", + "api_key", + "model_name", + "truncate_input_tokens", + "input_text" + ], + "beta": false, + "legacy": false, + "edited": false, + "metadata": { + "module": "lfx.components.ibm.watsonx_embeddings.WatsonxEmbeddingsComponent", + "code_hash": "ffded413ea90", + "dependencies": { + "total_dependencies": 5, + "dependencies": [ + { "name": "requests", "version": "2.32.5" }, + { "name": "ibm_watsonx_ai", "version": "1.3.34" }, + { "name": "langchain_ibm", "version": "0.3.16" }, + { "name": "pydantic", "version": "2.10.6" }, + { "name": "lfx", "version": null } + ] + } + }, + "tool_mode": false, + "official": false + }, + "showNode": true, + "type": "WatsonxEmbeddingsComponent", + "id": "WatsonxEmbeddingsComponent-q67FN" +}, +"id": "WatsonxEmbeddingsComponent-q67FN", +"position": { "x": 0, "y": 0 }, +"type": "genericNode" +} \ No newline at end of file diff --git a/flows/components/watsonx_llm.json b/flows/components/watsonx_llm.json index 3d02d1fe..61baac42 100644 --- a/flows/components/watsonx_llm.json +++ b/flows/components/watsonx_llm.json @@ -1,538 +1,521 @@ { "data": { - "edges": [], - "nodes": [ - { - "data": { - "node": { - "template": { - "_type": "Component", - "api_key": { - "load_from_db": false, - "required": true, - "placeholder": "", - "show": true, - "name": "api_key", - "value": "", - "display_name": "Watsonx API Key", - "advanced": false, - "input_types": [], - "dynamic": false, - "info": "The API Key to use for the model.", - "title_case": false, - "password": true, - "type": "str", - "_input_type": "SecretStrInput" - }, - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "import json\nfrom typing import Any\n\nimport requests\nfrom langchain_ibm import ChatWatsonx\nfrom pydantic.v1 import SecretStr\n\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\n\n\nclass WatsonxAIComponent(LCModelComponent):\n display_name = \"IBM watsonx.ai\"\n description = \"Generate text using IBM watsonx.ai foundation models.\"\n icon = \"WatsonxAI\"\n name = \"IBMwatsonxModel\"\n beta = False\n\n _default_models = [\"ibm/granite-3-2b-instruct\", \"ibm/granite-3-8b-instruct\", \"ibm/granite-13b-instruct-v2\"]\n\n inputs = [\n *LCModelComponent.get_base_inputs(),\n DropdownInput(\n name=\"url\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API.\",\n value=None,\n options=[\n \"https://us-south.ml.cloud.ibm.com\",\n \"https://eu-de.ml.cloud.ibm.com\",\n \"https://eu-gb.ml.cloud.ibm.com\",\n \"https://au-syd.ml.cloud.ibm.com\",\n \"https://jp-tok.ml.cloud.ibm.com\",\n \"https://ca-tor.ml.cloud.ibm.com\",\n ],\n real_time_refresh=True,\n ),\n StrInput(\n name=\"project_id\",\n display_name=\"watsonx Project ID\",\n required=True,\n info=\"The project ID or deployment space ID that is associated with the foundation model.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Watsonx API Key\",\n info=\"The API Key to use for the model.\",\n required=True,\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=[],\n value=None,\n dynamic=True,\n required=True,\n ),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate.\",\n range_spec=RangeSpec(min=1, max=4096),\n value=1000,\n ),\n StrInput(\n name=\"stop_sequence\",\n display_name=\"Stop Sequence\",\n advanced=True,\n info=\"Sequence where generation should stop.\",\n field_type=\"str\",\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n info=\"Controls randomness, higher values increase diversity.\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=2, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"top_p\",\n display_name=\"Top P\",\n info=\"The cumulative probability cutoff for token selection. \"\n \"Lower values mean sampling from a smaller, more top-weighted nucleus.\",\n value=0.9,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"frequency_penalty\",\n display_name=\"Frequency Penalty\",\n info=\"Penalty for frequency of token usage.\",\n value=0.5,\n range_spec=RangeSpec(min=-2.0, max=2.0, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"presence_penalty\",\n display_name=\"Presence Penalty\",\n info=\"Penalty for token presence in prior text.\",\n value=0.3,\n range_spec=RangeSpec(min=-2.0, max=2.0, step=0.01),\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Random Seed\",\n advanced=True,\n info=\"The random seed for the model.\",\n value=8,\n ),\n BoolInput(\n name=\"logprobs\",\n display_name=\"Log Probabilities\",\n advanced=True,\n info=\"Whether to return log probabilities of the output tokens.\",\n value=True,\n ),\n IntInput(\n name=\"top_logprobs\",\n display_name=\"Top Log Probabilities\",\n advanced=True,\n info=\"Number of most likely tokens to return at each position.\",\n value=3,\n range_spec=RangeSpec(min=1, max=20),\n ),\n StrInput(\n name=\"logit_bias\",\n display_name=\"Logit Bias\",\n advanced=True,\n info='JSON string of token IDs to bias or suppress (e.g., {\"1003\": -100, \"1004\": 100}).',\n field_type=\"str\",\n ),\n ]\n\n @staticmethod\n def fetch_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\"version\": \"2024-09-16\", \"filters\": \"function_text_chat,!lifecycle_withdrawn\"}\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models. Using default models.\")\n return WatsonxAIComponent._default_models\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):\n \"\"\"Update model options when URL or API key changes.\"\"\"\n logger.info(\"Updating build config. Field name: %s, Field value: %s\", field_name, field_value)\n\n if field_name == \"url\" and field_value:\n try:\n models = self.fetch_models(base_url=build_config.url.value)\n build_config.model_name.options = models\n if build_config.model_name.value:\n build_config.model_name.value = models[0]\n info_message = f\"Updated model options: {len(models)} models found in {build_config.url.value}\"\n logger.info(info_message)\n except Exception: # noqa: BLE001\n logger.exception(\"Error updating model options.\")\n\n def build_model(self) -> LanguageModel:\n # Parse logit_bias from JSON string if provided\n logit_bias = None\n if hasattr(self, \"logit_bias\") and self.logit_bias:\n try:\n logit_bias = json.loads(self.logit_bias)\n except json.JSONDecodeError:\n logger.warning(\"Invalid logit_bias JSON format. Using default instead.\")\n logit_bias = {\"1003\": -100, \"1004\": -100}\n\n chat_params = {\n \"max_tokens\": getattr(self, \"max_tokens\", None),\n \"temperature\": getattr(self, \"temperature\", None),\n \"top_p\": getattr(self, \"top_p\", None),\n \"frequency_penalty\": getattr(self, \"frequency_penalty\", None),\n \"presence_penalty\": getattr(self, \"presence_penalty\", None),\n \"seed\": getattr(self, \"seed\", None),\n \"stop\": [self.stop_sequence] if self.stop_sequence else [],\n \"n\": 1,\n \"logprobs\": getattr(self, \"logprobs\", True),\n \"top_logprobs\": getattr(self, \"top_logprobs\", None),\n \"time_limit\": 600000,\n \"logit_bias\": logit_bias,\n }\n\n return ChatWatsonx(\n apikey=SecretStr(self.api_key).get_secret_value(),\n url=self.url,\n project_id=self.project_id,\n model_id=self.model_name,\n params=chat_params,\n streaming=self.stream,\n )\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "frequency_penalty": { - "tool_mode": false, - "min_label": "", - "max_label": "", - "min_label_icon": "", - "max_label_icon": "", - "slider_buttons": false, - "slider_buttons_options": [], - "slider_input": false, - "range_spec": { - "step_type": "float", - "min": -2, - "max": 2, - "step": 0.01 - }, - "required": false, - "placeholder": "", - "show": true, - "name": "frequency_penalty", - "value": 0.5, - "display_name": "Frequency Penalty", - "advanced": true, - "dynamic": false, - "info": "Penalty for frequency of token usage.", - "title_case": false, - "type": "slider", - "_input_type": "SliderInput" - }, - "input_value": { - "trace_as_input": true, - "tool_mode": false, - "trace_as_metadata": true, - "load_from_db": false, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "input_value", - "value": "", - "display_name": "Input", - "advanced": false, - "input_types": [ - "Message" - ], - "dynamic": false, - "info": "", - "title_case": false, - "type": "str", - "_input_type": "MessageInput" - }, - "logit_bias": { - "tool_mode": false, - "trace_as_metadata": true, - "load_from_db": false, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "logit_bias", - "value": "", - "display_name": "Logit Bias", - "advanced": true, - "dynamic": false, - "info": "JSON string of token IDs to bias or suppress (e.g., {\"1003\": -100, \"1004\": 100}).", - "title_case": false, - "type": "str", - "_input_type": "StrInput" - }, - "logprobs": { - "tool_mode": false, - "trace_as_metadata": true, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "logprobs", - "value": true, - "display_name": "Log Probabilities", - "advanced": true, - "dynamic": false, - "info": "Whether to return log probabilities of the output tokens.", - "title_case": false, - "type": "bool", - "_input_type": "BoolInput" - }, - "max_tokens": { - "tool_mode": false, - "trace_as_metadata": true, - "range_spec": { - "step_type": "float", - "min": 1, - "max": 4096, - "step": 0.1 - }, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "max_tokens", - "value": 1000, - "display_name": "Max Tokens", - "advanced": true, - "dynamic": false, - "info": "The maximum number of tokens to generate.", - "title_case": false, - "type": "int", - "_input_type": "IntInput" - }, - "model_name": { - "tool_mode": false, - "trace_as_metadata": true, - "options": [], - "options_metadata": [], - "combobox": false, - "dialog_inputs": {}, - "toggle": false, - "required": true, - "placeholder": "", - "show": true, - "name": "model_name", - "display_name": "Model Name", - "advanced": false, - "dynamic": true, - "info": "", - "title_case": false, - "external_options": {}, - "type": "str", - "_input_type": "DropdownInput" - }, - "presence_penalty": { - "tool_mode": false, - "min_label": "", - "max_label": "", - "min_label_icon": "", - "max_label_icon": "", - "slider_buttons": false, - "slider_buttons_options": [], - "slider_input": false, - "range_spec": { - "step_type": "float", - "min": -2, - "max": 2, - "step": 0.01 - }, - "required": false, - "placeholder": "", - "show": true, - "name": "presence_penalty", - "value": 0.3, - "display_name": "Presence Penalty", - "advanced": true, - "dynamic": false, - "info": "Penalty for token presence in prior text.", - "title_case": false, - "type": "slider", - "_input_type": "SliderInput" - }, - "project_id": { - "tool_mode": false, - "trace_as_metadata": true, - "load_from_db": false, - "list": false, - "list_add_label": "Add More", - "required": true, - "placeholder": "", - "show": true, - "name": "project_id", - "value": "", - "display_name": "watsonx Project ID", - "advanced": false, - "dynamic": false, - "info": "The project ID or deployment space ID that is associated with the foundation model.", - "title_case": false, - "type": "str", - "_input_type": "StrInput" - }, - "seed": { - "tool_mode": false, - "trace_as_metadata": true, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "seed", - "value": 8, - "display_name": "Random Seed", - "advanced": true, - "dynamic": false, - "info": "The random seed for the model.", - "title_case": false, - "type": "int", - "_input_type": "IntInput" - }, - "stop_sequence": { - "tool_mode": false, - "trace_as_metadata": true, - "load_from_db": false, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "stop_sequence", - "value": "", - "display_name": "Stop Sequence", - "advanced": true, - "dynamic": false, - "info": "Sequence where generation should stop.", - "title_case": false, - "type": "str", - "_input_type": "StrInput" - }, - "stream": { - "tool_mode": false, - "trace_as_metadata": true, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "stream", - "value": false, - "display_name": "Stream", - "advanced": true, - "dynamic": false, - "info": "Stream the response from the model. Streaming works only in Chat.", - "title_case": false, - "type": "bool", - "_input_type": "BoolInput" - }, - "system_message": { - "tool_mode": false, - "trace_as_input": true, - "multiline": true, - "trace_as_metadata": true, - "load_from_db": false, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "system_message", - "value": "", - "display_name": "System Message", - "advanced": false, - "input_types": [ - "Message" - ], - "dynamic": false, - "info": "System message to pass to the model.", - "title_case": false, - "copy_field": false, - "type": "str", - "_input_type": "MultilineInput" - }, - "temperature": { - "tool_mode": false, - "min_label": "", - "max_label": "", - "min_label_icon": "", - "max_label_icon": "", - "slider_buttons": false, - "slider_buttons_options": [], - "slider_input": false, - "range_spec": { - "step_type": "float", - "min": 0, - "max": 2, - "step": 0.01 - }, - "required": false, - "placeholder": "", - "show": true, - "name": "temperature", - "value": 0.1, - "display_name": "Temperature", - "advanced": true, - "dynamic": false, - "info": "Controls randomness, higher values increase diversity.", - "title_case": false, - "type": "slider", - "_input_type": "SliderInput" - }, - "top_logprobs": { - "tool_mode": false, - "trace_as_metadata": true, - "range_spec": { - "step_type": "float", - "min": 1, - "max": 20, - "step": 0.1 - }, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "top_logprobs", - "value": 3, - "display_name": "Top Log Probabilities", - "advanced": true, - "dynamic": false, - "info": "Number of most likely tokens to return at each position.", - "title_case": false, - "type": "int", - "_input_type": "IntInput" - }, - "top_p": { - "tool_mode": false, - "min_label": "", - "max_label": "", - "min_label_icon": "", - "max_label_icon": "", - "slider_buttons": false, - "slider_buttons_options": [], - "slider_input": false, - "range_spec": { - "step_type": "float", - "min": 0, - "max": 1, - "step": 0.01 - }, - "required": false, - "placeholder": "", - "show": true, - "name": "top_p", - "value": 0.9, - "display_name": "Top P", - "advanced": true, - "dynamic": false, - "info": "The cumulative probability cutoff for token selection. Lower values mean sampling from a smaller, more top-weighted nucleus.", - "title_case": false, - "type": "slider", - "_input_type": "SliderInput" - }, - "url": { - "tool_mode": false, - "trace_as_metadata": true, - "options": [ - "https://us-south.ml.cloud.ibm.com", - "https://eu-de.ml.cloud.ibm.com", - "https://eu-gb.ml.cloud.ibm.com", - "https://au-syd.ml.cloud.ibm.com", - "https://jp-tok.ml.cloud.ibm.com", - "https://ca-tor.ml.cloud.ibm.com" - ], - "options_metadata": [], - "combobox": false, - "dialog_inputs": {}, - "toggle": false, - "required": false, - "placeholder": "", - "show": true, - "name": "url", - "display_name": "watsonx API Endpoint", - "advanced": false, - "dynamic": false, - "info": "The base URL of the API.", - "real_time_refresh": true, - "title_case": false, - "external_options": {}, - "type": "str", - "_input_type": "DropdownInput" - } - }, - "description": "Generate text using IBM watsonx.ai foundation models.", - "icon": "WatsonxAI", - "base_classes": [ - "LanguageModel", - "Message" - ], - "display_name": "IBM watsonx.ai", - "documentation": "", - "minimized": false, - "custom_fields": {}, - "output_types": [], - "pinned": false, - "conditional_paths": [], - "frozen": false, - "outputs": [ - { - "types": [ - "Message" - ], - "name": "text_output", - "display_name": "Model Response", - "method": "text_response", - "value": "__UNDEFINED__", - "cache": true, - "allows_loop": false, - "group_outputs": false, - "tool_mode": true - }, - { - "types": [ - "LanguageModel" - ], - "selected": "LanguageModel", - "name": "model_output", - "display_name": "Language Model", - "method": "build_model", - "value": "__UNDEFINED__", - "cache": true, - "allows_loop": false, - "group_outputs": false, - "tool_mode": true - } - ], - "field_order": [ - "input_value", - "system_message", - "stream", - "url", - "project_id", - "api_key", - "model_name", - "max_tokens", - "stop_sequence", - "temperature", - "top_p", - "frequency_penalty", - "presence_penalty", - "seed", - "logprobs", - "top_logprobs", - "logit_bias" - ], - "beta": false, - "legacy": false, - "edited": false, - "metadata": { - "keywords": [ - "model", - "llm", - "language model", - "large language model" - ], - "module": "lfx.components.ibm.watsonx.WatsonxAIComponent", - "code_hash": "85c24939214c", - "dependencies": { - "total_dependencies": 4, - "dependencies": [ - { - "name": "requests", - "version": "2.32.5" - }, - { - "name": "langchain_ibm", - "version": "0.3.16" - }, - { - "name": "pydantic", - "version": "2.10.6" - }, - { - "name": "lfx", - "version": null - } - ] - } - }, - "tool_mode": false, - "official": false + "node": { + "template": { + "_type": "Component", + "api_key": { + "load_from_db": true, + "required": true, + "placeholder": "", + "show": true, + "name": "api_key", + "value": "WATSONX_API_KEY", + "display_name": "Watsonx API Key", + "advanced": false, + "input_types": [], + "dynamic": false, + "info": "The API Key to use for the model.", + "title_case": false, + "password": true, + "type": "str", + "_input_type": "SecretStrInput" + }, + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "import json\nfrom typing import Any\n\nimport requests\nfrom langchain_ibm import ChatWatsonx\nfrom pydantic.v1 import SecretStr\n\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\n\n\nclass WatsonxAIComponent(LCModelComponent):\n display_name = \"IBM watsonx.ai\"\n description = \"Generate text using IBM watsonx.ai foundation models.\"\n icon = \"WatsonxAI\"\n name = \"IBMwatsonxModel\"\n beta = False\n\n _default_models = [\"ibm/granite-3-2b-instruct\", \"ibm/granite-3-8b-instruct\", \"ibm/granite-13b-instruct-v2\"]\n\n inputs = [\n *LCModelComponent.get_base_inputs(),\n DropdownInput(\n name=\"url\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API.\",\n value=None,\n options=[\n \"https://us-south.ml.cloud.ibm.com\",\n \"https://eu-de.ml.cloud.ibm.com\",\n \"https://eu-gb.ml.cloud.ibm.com\",\n \"https://au-syd.ml.cloud.ibm.com\",\n \"https://jp-tok.ml.cloud.ibm.com\",\n \"https://ca-tor.ml.cloud.ibm.com\",\n ],\n real_time_refresh=True,\n ),\n StrInput(\n name=\"project_id\",\n display_name=\"watsonx Project ID\",\n required=True,\n info=\"The project ID or deployment space ID that is associated with the foundation model.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Watsonx API Key\",\n info=\"The API Key to use for the model.\",\n required=True,\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=[],\n value=None,\n dynamic=True,\n required=True,\n ),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate.\",\n range_spec=RangeSpec(min=1, max=4096),\n value=1000,\n ),\n StrInput(\n name=\"stop_sequence\",\n display_name=\"Stop Sequence\",\n advanced=True,\n info=\"Sequence where generation should stop.\",\n field_type=\"str\",\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n info=\"Controls randomness, higher values increase diversity.\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=2, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"top_p\",\n display_name=\"Top P\",\n info=\"The cumulative probability cutoff for token selection. \"\n \"Lower values mean sampling from a smaller, more top-weighted nucleus.\",\n value=0.9,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"frequency_penalty\",\n display_name=\"Frequency Penalty\",\n info=\"Penalty for frequency of token usage.\",\n value=0.5,\n range_spec=RangeSpec(min=-2.0, max=2.0, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"presence_penalty\",\n display_name=\"Presence Penalty\",\n info=\"Penalty for token presence in prior text.\",\n value=0.3,\n range_spec=RangeSpec(min=-2.0, max=2.0, step=0.01),\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Random Seed\",\n advanced=True,\n info=\"The random seed for the model.\",\n value=8,\n ),\n BoolInput(\n name=\"logprobs\",\n display_name=\"Log Probabilities\",\n advanced=True,\n info=\"Whether to return log probabilities of the output tokens.\",\n value=True,\n ),\n IntInput(\n name=\"top_logprobs\",\n display_name=\"Top Log Probabilities\",\n advanced=True,\n info=\"Number of most likely tokens to return at each position.\",\n value=3,\n range_spec=RangeSpec(min=1, max=20),\n ),\n StrInput(\n name=\"logit_bias\",\n display_name=\"Logit Bias\",\n advanced=True,\n info='JSON string of token IDs to bias or suppress (e.g., {\"1003\": -100, \"1004\": 100}).',\n field_type=\"str\",\n ),\n ]\n\n @staticmethod\n def fetch_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\"version\": \"2024-09-16\", \"filters\": \"function_text_chat,!lifecycle_withdrawn\"}\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models. Using default models.\")\n return WatsonxAIComponent._default_models\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):\n \"\"\"Update model options when URL or API key changes.\"\"\"\n logger.info(\"Updating build config. Field name: %s, Field value: %s\", field_name, field_value)\n\n if field_name == \"url\" and field_value:\n try:\n models = self.fetch_models(base_url=build_config.url.value)\n build_config.model_name.options = models\n if build_config.model_name.value:\n build_config.model_name.value = models[0]\n info_message = f\"Updated model options: {len(models)} models found in {build_config.url.value}\"\n logger.info(info_message)\n except Exception: # noqa: BLE001\n logger.exception(\"Error updating model options.\")\n\n def build_model(self) -> LanguageModel:\n # Parse logit_bias from JSON string if provided\n logit_bias = None\n if hasattr(self, \"logit_bias\") and self.logit_bias:\n try:\n logit_bias = json.loads(self.logit_bias)\n except json.JSONDecodeError:\n logger.warning(\"Invalid logit_bias JSON format. Using default instead.\")\n logit_bias = {\"1003\": -100, \"1004\": -100}\n\n chat_params = {\n \"max_tokens\": getattr(self, \"max_tokens\", None),\n \"temperature\": getattr(self, \"temperature\", None),\n \"top_p\": getattr(self, \"top_p\", None),\n \"frequency_penalty\": getattr(self, \"frequency_penalty\", None),\n \"presence_penalty\": getattr(self, \"presence_penalty\", None),\n \"seed\": getattr(self, \"seed\", None),\n \"stop\": [self.stop_sequence] if self.stop_sequence else [],\n \"n\": 1,\n \"logprobs\": getattr(self, \"logprobs\", True),\n \"top_logprobs\": getattr(self, \"top_logprobs\", None),\n \"time_limit\": 600000,\n \"logit_bias\": logit_bias,\n }\n\n return ChatWatsonx(\n apikey=SecretStr(self.api_key).get_secret_value(),\n url=self.url,\n project_id=self.project_id,\n model_id=self.model_name,\n params=chat_params,\n streaming=self.stream,\n )\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "frequency_penalty": { + "tool_mode": false, + "min_label": "", + "max_label": "", + "min_label_icon": "", + "max_label_icon": "", + "slider_buttons": false, + "slider_buttons_options": [], + "slider_input": false, + "range_spec": { + "step_type": "float", + "min": -2, + "max": 2, + "step": 0.01 }, - "showNode": true, - "type": "IBMwatsonxModel", - "id": "IBMwatsonxModel-qXZxc", - "selected_output": "model_output" + "required": false, + "placeholder": "", + "show": true, + "name": "frequency_penalty", + "value": 0.5, + "display_name": "Frequency Penalty", + "advanced": true, + "dynamic": false, + "info": "Penalty for frequency of token usage.", + "title_case": false, + "type": "slider", + "_input_type": "SliderInput" }, - "id": "IBMwatsonxModel-qXZxc", - "position": { - "x": 0, - "y": 0 + "input_value": { + "trace_as_input": true, + "tool_mode": false, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "input_value", + "value": "", + "display_name": "Input", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "", + "title_case": false, + "type": "str", + "_input_type": "MessageInput" }, - "type": "genericNode" - } - ], - "viewport": { - "x": 1, - "y": 1, - "zoom": 1 - } + "logit_bias": { + "tool_mode": false, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "logit_bias", + "value": "", + "display_name": "Logit Bias", + "advanced": true, + "dynamic": false, + "info": "JSON string of token IDs to bias or suppress (e.g., {\"1003\": -100, \"1004\": 100}).", + "title_case": false, + "type": "str", + "_input_type": "StrInput" + }, + "logprobs": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "logprobs", + "value": true, + "display_name": "Log Probabilities", + "advanced": true, + "dynamic": false, + "info": "Whether to return log probabilities of the output tokens.", + "title_case": false, + "type": "bool", + "_input_type": "BoolInput" + }, + "max_tokens": { + "tool_mode": false, + "trace_as_metadata": true, + "range_spec": { + "step_type": "float", + "min": 1, + "max": 4096, + "step": 0.1 + }, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "max_tokens", + "value": 1000, + "display_name": "Max Tokens", + "advanced": true, + "dynamic": false, + "info": "The maximum number of tokens to generate.", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "model_name": { + "tool_mode": false, + "trace_as_metadata": true, + "options": [], + "options_metadata": [], + "combobox": false, + "dialog_inputs": {}, + "toggle": false, + "required": true, + "placeholder": "", + "show": true, + "name": "model_name", + "display_name": "Model Name", + "advanced": false, + "dynamic": true, + "info": "", + "title_case": false, + "external_options": {}, + "type": "str", + "_input_type": "DropdownInput" + }, + "presence_penalty": { + "tool_mode": false, + "min_label": "", + "max_label": "", + "min_label_icon": "", + "max_label_icon": "", + "slider_buttons": false, + "slider_buttons_options": [], + "slider_input": false, + "range_spec": { + "step_type": "float", + "min": -2, + "max": 2, + "step": 0.01 + }, + "required": false, + "placeholder": "", + "show": true, + "name": "presence_penalty", + "value": 0.3, + "display_name": "Presence Penalty", + "advanced": true, + "dynamic": false, + "info": "Penalty for token presence in prior text.", + "title_case": false, + "type": "slider", + "_input_type": "SliderInput" + }, + "project_id": { + "tool_mode": false, + "trace_as_metadata": true, + "load_from_db": true, + "list": false, + "list_add_label": "Add More", + "required": true, + "placeholder": "", + "show": true, + "name": "project_id", + "value": "WATSONX_PROJECT_ID", + "display_name": "watsonx Project ID", + "advanced": false, + "dynamic": false, + "info": "The project ID or deployment space ID that is associated with the foundation model.", + "title_case": false, + "type": "str", + "_input_type": "StrInput" + }, + "seed": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "seed", + "value": 8, + "display_name": "Random Seed", + "advanced": true, + "dynamic": false, + "info": "The random seed for the model.", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "stop_sequence": { + "tool_mode": false, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "stop_sequence", + "value": "", + "display_name": "Stop Sequence", + "advanced": true, + "dynamic": false, + "info": "Sequence where generation should stop.", + "title_case": false, + "type": "str", + "_input_type": "StrInput" + }, + "stream": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "stream", + "value": false, + "display_name": "Stream", + "advanced": true, + "dynamic": false, + "info": "Stream the response from the model. Streaming works only in Chat.", + "title_case": false, + "type": "bool", + "_input_type": "BoolInput" + }, + "system_message": { + "tool_mode": false, + "trace_as_input": true, + "multiline": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "system_message", + "value": "", + "display_name": "System Message", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "System message to pass to the model.", + "title_case": false, + "copy_field": false, + "type": "str", + "_input_type": "MultilineInput" + }, + "temperature": { + "tool_mode": false, + "min_label": "", + "max_label": "", + "min_label_icon": "", + "max_label_icon": "", + "slider_buttons": false, + "slider_buttons_options": [], + "slider_input": false, + "range_spec": { + "step_type": "float", + "min": 0, + "max": 2, + "step": 0.01 + }, + "required": false, + "placeholder": "", + "show": true, + "name": "temperature", + "value": 0.1, + "display_name": "Temperature", + "advanced": true, + "dynamic": false, + "info": "Controls randomness, higher values increase diversity.", + "title_case": false, + "type": "slider", + "_input_type": "SliderInput" + }, + "top_logprobs": { + "tool_mode": false, + "trace_as_metadata": true, + "range_spec": { + "step_type": "float", + "min": 1, + "max": 20, + "step": 0.1 + }, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "top_logprobs", + "value": 3, + "display_name": "Top Log Probabilities", + "advanced": true, + "dynamic": false, + "info": "Number of most likely tokens to return at each position.", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "top_p": { + "tool_mode": false, + "min_label": "", + "max_label": "", + "min_label_icon": "", + "max_label_icon": "", + "slider_buttons": false, + "slider_buttons_options": [], + "slider_input": false, + "range_spec": { + "step_type": "float", + "min": 0, + "max": 1, + "step": 0.01 + }, + "required": false, + "placeholder": "", + "show": true, + "name": "top_p", + "value": 0.9, + "display_name": "Top P", + "advanced": true, + "dynamic": false, + "info": "The cumulative probability cutoff for token selection. Lower values mean sampling from a smaller, more top-weighted nucleus.", + "title_case": false, + "type": "slider", + "_input_type": "SliderInput" + }, + "url": { + "tool_mode": false, + "trace_as_metadata": true, + "options": [ + "https://us-south.ml.cloud.ibm.com", + "https://eu-de.ml.cloud.ibm.com", + "https://eu-gb.ml.cloud.ibm.com", + "https://au-syd.ml.cloud.ibm.com", + "https://jp-tok.ml.cloud.ibm.com", + "https://ca-tor.ml.cloud.ibm.com" + ], + "options_metadata": [], + "combobox": false, + "dialog_inputs": {}, + "toggle": false, + "required": false, + "placeholder": "", + "show": true, + "name": "url", + "display_name": "watsonx API Endpoint", + "advanced": false, + "dynamic": false, + "info": "The base URL of the API.", + "real_time_refresh": true, + "title_case": false, + "external_options": {}, + "type": "str", + "_input_type": "DropdownInput" + } + }, + "description": "Generate text using IBM watsonx.ai foundation models.", + "icon": "WatsonxAI", + "base_classes": [ + "LanguageModel", + "Message" + ], + "display_name": "IBM watsonx.ai", + "documentation": "", + "minimized": false, + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Message" + ], + "name": "text_output", + "display_name": "Model Response", + "method": "text_response", + "value": "__UNDEFINED__", + "cache": true, + "allows_loop": false, + "group_outputs": false, + "tool_mode": true + }, + { + "types": [ + "LanguageModel" + ], + "selected": "LanguageModel", + "name": "model_output", + "display_name": "Language Model", + "method": "build_model", + "value": "__UNDEFINED__", + "cache": true, + "allows_loop": false, + "group_outputs": false, + "tool_mode": true + } + ], + "field_order": [ + "input_value", + "system_message", + "stream", + "url", + "project_id", + "api_key", + "model_name", + "max_tokens", + "stop_sequence", + "temperature", + "top_p", + "frequency_penalty", + "presence_penalty", + "seed", + "logprobs", + "top_logprobs", + "logit_bias" + ], + "beta": false, + "legacy": false, + "edited": false, + "metadata": { + "keywords": [ + "model", + "llm", + "language model", + "large language model" + ], + "module": "lfx.components.ibm.watsonx.WatsonxAIComponent", + "code_hash": "85c24939214c", + "dependencies": { + "total_dependencies": 4, + "dependencies": [ + { + "name": "requests", + "version": "2.32.5" + }, + { + "name": "langchain_ibm", + "version": "0.3.16" + }, + { + "name": "pydantic", + "version": "2.10.6" + }, + { + "name": "lfx", + "version": null + } + ] + } + }, + "tool_mode": false, + "official": false + }, + "showNode": true, + "type": "IBMwatsonxModel", + "id": "IBMwatsonxModel-qXZxc", + "selected_output": "model_output" }, - "description": "Generate text using IBM watsonx.ai foundation models.", - "name": "IBM watsonx.ai", "id": "IBMwatsonxModel-qXZxc", - "is_component": true, - "last_tested_version": "1.6.0" + "position": { + "x": 0, + "y": 0 + }, + "type": "genericNode" } \ No newline at end of file diff --git a/flows/components/watsonx_llm_text.json b/flows/components/watsonx_llm_text.json index a1116bf1..f0f42455 100644 --- a/flows/components/watsonx_llm_text.json +++ b/flows/components/watsonx_llm_text.json @@ -1,538 +1,521 @@ { "data": { - "edges": [], - "nodes": [ - { - "data": { - "node": { - "template": { - "_type": "Component", - "api_key": { - "load_from_db": false, - "required": true, - "placeholder": "", - "show": true, - "name": "api_key", - "value": "", - "display_name": "Watsonx API Key", - "advanced": false, - "input_types": [], - "dynamic": false, - "info": "The API Key to use for the model.", - "title_case": false, - "password": true, - "type": "str", - "_input_type": "SecretStrInput" - }, - "code": { - "type": "code", - "required": true, - "placeholder": "", - "list": false, - "show": true, - "multiline": true, - "value": "import json\nfrom typing import Any\n\nimport requests\nfrom langchain_ibm import ChatWatsonx\nfrom pydantic.v1 import SecretStr\n\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\n\n\nclass WatsonxAIComponent(LCModelComponent):\n display_name = \"IBM watsonx.ai\"\n description = \"Generate text using IBM watsonx.ai foundation models.\"\n icon = \"WatsonxAI\"\n name = \"IBMwatsonxModel\"\n beta = False\n\n _default_models = [\"ibm/granite-3-2b-instruct\", \"ibm/granite-3-8b-instruct\", \"ibm/granite-13b-instruct-v2\"]\n\n inputs = [\n *LCModelComponent.get_base_inputs(),\n DropdownInput(\n name=\"url\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API.\",\n value=None,\n options=[\n \"https://us-south.ml.cloud.ibm.com\",\n \"https://eu-de.ml.cloud.ibm.com\",\n \"https://eu-gb.ml.cloud.ibm.com\",\n \"https://au-syd.ml.cloud.ibm.com\",\n \"https://jp-tok.ml.cloud.ibm.com\",\n \"https://ca-tor.ml.cloud.ibm.com\",\n ],\n real_time_refresh=True,\n ),\n StrInput(\n name=\"project_id\",\n display_name=\"watsonx Project ID\",\n required=True,\n info=\"The project ID or deployment space ID that is associated with the foundation model.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Watsonx API Key\",\n info=\"The API Key to use for the model.\",\n required=True,\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=[],\n value=None,\n dynamic=True,\n required=True,\n ),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate.\",\n range_spec=RangeSpec(min=1, max=4096),\n value=1000,\n ),\n StrInput(\n name=\"stop_sequence\",\n display_name=\"Stop Sequence\",\n advanced=True,\n info=\"Sequence where generation should stop.\",\n field_type=\"str\",\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n info=\"Controls randomness, higher values increase diversity.\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=2, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"top_p\",\n display_name=\"Top P\",\n info=\"The cumulative probability cutoff for token selection. \"\n \"Lower values mean sampling from a smaller, more top-weighted nucleus.\",\n value=0.9,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"frequency_penalty\",\n display_name=\"Frequency Penalty\",\n info=\"Penalty for frequency of token usage.\",\n value=0.5,\n range_spec=RangeSpec(min=-2.0, max=2.0, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"presence_penalty\",\n display_name=\"Presence Penalty\",\n info=\"Penalty for token presence in prior text.\",\n value=0.3,\n range_spec=RangeSpec(min=-2.0, max=2.0, step=0.01),\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Random Seed\",\n advanced=True,\n info=\"The random seed for the model.\",\n value=8,\n ),\n BoolInput(\n name=\"logprobs\",\n display_name=\"Log Probabilities\",\n advanced=True,\n info=\"Whether to return log probabilities of the output tokens.\",\n value=True,\n ),\n IntInput(\n name=\"top_logprobs\",\n display_name=\"Top Log Probabilities\",\n advanced=True,\n info=\"Number of most likely tokens to return at each position.\",\n value=3,\n range_spec=RangeSpec(min=1, max=20),\n ),\n StrInput(\n name=\"logit_bias\",\n display_name=\"Logit Bias\",\n advanced=True,\n info='JSON string of token IDs to bias or suppress (e.g., {\"1003\": -100, \"1004\": 100}).',\n field_type=\"str\",\n ),\n ]\n\n @staticmethod\n def fetch_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\"version\": \"2024-09-16\", \"filters\": \"function_text_chat,!lifecycle_withdrawn\"}\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models. Using default models.\")\n return WatsonxAIComponent._default_models\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):\n \"\"\"Update model options when URL or API key changes.\"\"\"\n logger.info(\"Updating build config. Field name: %s, Field value: %s\", field_name, field_value)\n\n if field_name == \"url\" and field_value:\n try:\n models = self.fetch_models(base_url=build_config.url.value)\n build_config.model_name.options = models\n if build_config.model_name.value:\n build_config.model_name.value = models[0]\n info_message = f\"Updated model options: {len(models)} models found in {build_config.url.value}\"\n logger.info(info_message)\n except Exception: # noqa: BLE001\n logger.exception(\"Error updating model options.\")\n\n def build_model(self) -> LanguageModel:\n # Parse logit_bias from JSON string if provided\n logit_bias = None\n if hasattr(self, \"logit_bias\") and self.logit_bias:\n try:\n logit_bias = json.loads(self.logit_bias)\n except json.JSONDecodeError:\n logger.warning(\"Invalid logit_bias JSON format. Using default instead.\")\n logit_bias = {\"1003\": -100, \"1004\": -100}\n\n chat_params = {\n \"max_tokens\": getattr(self, \"max_tokens\", None),\n \"temperature\": getattr(self, \"temperature\", None),\n \"top_p\": getattr(self, \"top_p\", None),\n \"frequency_penalty\": getattr(self, \"frequency_penalty\", None),\n \"presence_penalty\": getattr(self, \"presence_penalty\", None),\n \"seed\": getattr(self, \"seed\", None),\n \"stop\": [self.stop_sequence] if self.stop_sequence else [],\n \"n\": 1,\n \"logprobs\": getattr(self, \"logprobs\", True),\n \"top_logprobs\": getattr(self, \"top_logprobs\", None),\n \"time_limit\": 600000,\n \"logit_bias\": logit_bias,\n }\n\n return ChatWatsonx(\n apikey=SecretStr(self.api_key).get_secret_value(),\n url=self.url,\n project_id=self.project_id,\n model_id=self.model_name,\n params=chat_params,\n streaming=self.stream,\n )\n", - "fileTypes": [], - "file_path": "", - "password": false, - "name": "code", - "advanced": true, - "dynamic": true, - "info": "", - "load_from_db": false, - "title_case": false - }, - "frequency_penalty": { - "tool_mode": false, - "min_label": "", - "max_label": "", - "min_label_icon": "", - "max_label_icon": "", - "slider_buttons": false, - "slider_buttons_options": [], - "slider_input": false, - "range_spec": { - "step_type": "float", - "min": -2, - "max": 2, - "step": 0.01 - }, - "required": false, - "placeholder": "", - "show": true, - "name": "frequency_penalty", - "value": 0.5, - "display_name": "Frequency Penalty", - "advanced": true, - "dynamic": false, - "info": "Penalty for frequency of token usage.", - "title_case": false, - "type": "slider", - "_input_type": "SliderInput" - }, - "input_value": { - "trace_as_input": true, - "tool_mode": false, - "trace_as_metadata": true, - "load_from_db": false, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "input_value", - "value": "", - "display_name": "Input", - "advanced": false, - "input_types": [ - "Message" - ], - "dynamic": false, - "info": "", - "title_case": false, - "type": "str", - "_input_type": "MessageInput" - }, - "logit_bias": { - "tool_mode": false, - "trace_as_metadata": true, - "load_from_db": false, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "logit_bias", - "value": "", - "display_name": "Logit Bias", - "advanced": true, - "dynamic": false, - "info": "JSON string of token IDs to bias or suppress (e.g., {\"1003\": -100, \"1004\": 100}).", - "title_case": false, - "type": "str", - "_input_type": "StrInput" - }, - "logprobs": { - "tool_mode": false, - "trace_as_metadata": true, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "logprobs", - "value": true, - "display_name": "Log Probabilities", - "advanced": true, - "dynamic": false, - "info": "Whether to return log probabilities of the output tokens.", - "title_case": false, - "type": "bool", - "_input_type": "BoolInput" - }, - "max_tokens": { - "tool_mode": false, - "trace_as_metadata": true, - "range_spec": { - "step_type": "float", - "min": 1, - "max": 4096, - "step": 0.1 - }, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "max_tokens", - "value": 1000, - "display_name": "Max Tokens", - "advanced": true, - "dynamic": false, - "info": "The maximum number of tokens to generate.", - "title_case": false, - "type": "int", - "_input_type": "IntInput" - }, - "model_name": { - "tool_mode": false, - "trace_as_metadata": true, - "options": [], - "options_metadata": [], - "combobox": false, - "dialog_inputs": {}, - "toggle": false, - "required": true, - "placeholder": "", - "show": true, - "name": "model_name", - "display_name": "Model Name", - "advanced": false, - "dynamic": true, - "info": "", - "title_case": false, - "external_options": {}, - "type": "str", - "_input_type": "DropdownInput" - }, - "presence_penalty": { - "tool_mode": false, - "min_label": "", - "max_label": "", - "min_label_icon": "", - "max_label_icon": "", - "slider_buttons": false, - "slider_buttons_options": [], - "slider_input": false, - "range_spec": { - "step_type": "float", - "min": -2, - "max": 2, - "step": 0.01 - }, - "required": false, - "placeholder": "", - "show": true, - "name": "presence_penalty", - "value": 0.3, - "display_name": "Presence Penalty", - "advanced": true, - "dynamic": false, - "info": "Penalty for token presence in prior text.", - "title_case": false, - "type": "slider", - "_input_type": "SliderInput" - }, - "project_id": { - "tool_mode": false, - "trace_as_metadata": true, - "load_from_db": false, - "list": false, - "list_add_label": "Add More", - "required": true, - "placeholder": "", - "show": true, - "name": "project_id", - "value": "", - "display_name": "watsonx Project ID", - "advanced": false, - "dynamic": false, - "info": "The project ID or deployment space ID that is associated with the foundation model.", - "title_case": false, - "type": "str", - "_input_type": "StrInput" - }, - "seed": { - "tool_mode": false, - "trace_as_metadata": true, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "seed", - "value": 8, - "display_name": "Random Seed", - "advanced": true, - "dynamic": false, - "info": "The random seed for the model.", - "title_case": false, - "type": "int", - "_input_type": "IntInput" - }, - "stop_sequence": { - "tool_mode": false, - "trace_as_metadata": true, - "load_from_db": false, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "stop_sequence", - "value": "", - "display_name": "Stop Sequence", - "advanced": true, - "dynamic": false, - "info": "Sequence where generation should stop.", - "title_case": false, - "type": "str", - "_input_type": "StrInput" - }, - "stream": { - "tool_mode": false, - "trace_as_metadata": true, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "stream", - "value": false, - "display_name": "Stream", - "advanced": true, - "dynamic": false, - "info": "Stream the response from the model. Streaming works only in Chat.", - "title_case": false, - "type": "bool", - "_input_type": "BoolInput" - }, - "system_message": { - "tool_mode": false, - "trace_as_input": true, - "multiline": true, - "trace_as_metadata": true, - "load_from_db": false, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "system_message", - "value": "", - "display_name": "System Message", - "advanced": false, - "input_types": [ - "Message" - ], - "dynamic": false, - "info": "System message to pass to the model.", - "title_case": false, - "copy_field": false, - "type": "str", - "_input_type": "MultilineInput" - }, - "temperature": { - "tool_mode": false, - "min_label": "", - "max_label": "", - "min_label_icon": "", - "max_label_icon": "", - "slider_buttons": false, - "slider_buttons_options": [], - "slider_input": false, - "range_spec": { - "step_type": "float", - "min": 0, - "max": 2, - "step": 0.01 - }, - "required": false, - "placeholder": "", - "show": true, - "name": "temperature", - "value": 0.1, - "display_name": "Temperature", - "advanced": true, - "dynamic": false, - "info": "Controls randomness, higher values increase diversity.", - "title_case": false, - "type": "slider", - "_input_type": "SliderInput" - }, - "top_logprobs": { - "tool_mode": false, - "trace_as_metadata": true, - "range_spec": { - "step_type": "float", - "min": 1, - "max": 20, - "step": 0.1 - }, - "list": false, - "list_add_label": "Add More", - "required": false, - "placeholder": "", - "show": true, - "name": "top_logprobs", - "value": 3, - "display_name": "Top Log Probabilities", - "advanced": true, - "dynamic": false, - "info": "Number of most likely tokens to return at each position.", - "title_case": false, - "type": "int", - "_input_type": "IntInput" - }, - "top_p": { - "tool_mode": false, - "min_label": "", - "max_label": "", - "min_label_icon": "", - "max_label_icon": "", - "slider_buttons": false, - "slider_buttons_options": [], - "slider_input": false, - "range_spec": { - "step_type": "float", - "min": 0, - "max": 1, - "step": 0.01 - }, - "required": false, - "placeholder": "", - "show": true, - "name": "top_p", - "value": 0.9, - "display_name": "Top P", - "advanced": true, - "dynamic": false, - "info": "The cumulative probability cutoff for token selection. Lower values mean sampling from a smaller, more top-weighted nucleus.", - "title_case": false, - "type": "slider", - "_input_type": "SliderInput" - }, - "url": { - "tool_mode": false, - "trace_as_metadata": true, - "options": [ - "https://us-south.ml.cloud.ibm.com", - "https://eu-de.ml.cloud.ibm.com", - "https://eu-gb.ml.cloud.ibm.com", - "https://au-syd.ml.cloud.ibm.com", - "https://jp-tok.ml.cloud.ibm.com", - "https://ca-tor.ml.cloud.ibm.com" - ], - "options_metadata": [], - "combobox": false, - "dialog_inputs": {}, - "toggle": false, - "required": false, - "placeholder": "", - "show": true, - "name": "url", - "display_name": "watsonx API Endpoint", - "advanced": false, - "dynamic": false, - "info": "The base URL of the API.", - "real_time_refresh": true, - "title_case": false, - "external_options": {}, - "type": "str", - "_input_type": "DropdownInput" - } - }, - "description": "Generate text using IBM watsonx.ai foundation models.", - "icon": "WatsonxAI", - "base_classes": [ - "LanguageModel", - "Message" - ], - "display_name": "IBM watsonx.ai", - "documentation": "", - "minimized": false, - "custom_fields": {}, - "output_types": [], - "pinned": false, - "conditional_paths": [], - "frozen": false, - "outputs": [ - { - "types": [ - "Message" - ], - "selected": "Message", - "name": "text_output", - "display_name": "Model Response", - "method": "text_response", - "value": "__UNDEFINED__", - "cache": true, - "allows_loop": false, - "group_outputs": false, - "tool_mode": true - }, - { - "types": [ - "LanguageModel" - ], - "name": "model_output", - "display_name": "Language Model", - "method": "build_model", - "value": "__UNDEFINED__", - "cache": true, - "allows_loop": false, - "group_outputs": false, - "tool_mode": true - } - ], - "field_order": [ - "input_value", - "system_message", - "stream", - "url", - "project_id", - "api_key", - "model_name", - "max_tokens", - "stop_sequence", - "temperature", - "top_p", - "frequency_penalty", - "presence_penalty", - "seed", - "logprobs", - "top_logprobs", - "logit_bias" - ], - "beta": false, - "legacy": false, - "edited": false, - "metadata": { - "keywords": [ - "model", - "llm", - "language model", - "large language model" - ], - "module": "lfx.components.ibm.watsonx.WatsonxAIComponent", - "code_hash": "85c24939214c", - "dependencies": { - "total_dependencies": 4, - "dependencies": [ - { - "name": "requests", - "version": "2.32.5" - }, - { - "name": "langchain_ibm", - "version": "0.3.16" - }, - { - "name": "pydantic", - "version": "2.10.6" - }, - { - "name": "lfx", - "version": null - } - ] - } - }, - "tool_mode": false, - "official": false + "node": { + "template": { + "_type": "Component", + "api_key": { + "load_from_db": true, + "required": true, + "placeholder": "", + "show": true, + "name": "api_key", + "value": "WATSONX_API_KEY", + "display_name": "Watsonx API Key", + "advanced": false, + "input_types": [], + "dynamic": false, + "info": "The API Key to use for the model.", + "title_case": false, + "password": true, + "type": "str", + "_input_type": "SecretStrInput" + }, + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "import json\nfrom typing import Any\n\nimport requests\nfrom langchain_ibm import ChatWatsonx\nfrom pydantic.v1 import SecretStr\n\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\n\n\nclass WatsonxAIComponent(LCModelComponent):\n display_name = \"IBM watsonx.ai\"\n description = \"Generate text using IBM watsonx.ai foundation models.\"\n icon = \"WatsonxAI\"\n name = \"IBMwatsonxModel\"\n beta = False\n\n _default_models = [\"ibm/granite-3-2b-instruct\", \"ibm/granite-3-8b-instruct\", \"ibm/granite-13b-instruct-v2\"]\n\n inputs = [\n *LCModelComponent.get_base_inputs(),\n DropdownInput(\n name=\"url\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API.\",\n value=None,\n options=[\n \"https://us-south.ml.cloud.ibm.com\",\n \"https://eu-de.ml.cloud.ibm.com\",\n \"https://eu-gb.ml.cloud.ibm.com\",\n \"https://au-syd.ml.cloud.ibm.com\",\n \"https://jp-tok.ml.cloud.ibm.com\",\n \"https://ca-tor.ml.cloud.ibm.com\",\n ],\n real_time_refresh=True,\n ),\n StrInput(\n name=\"project_id\",\n display_name=\"watsonx Project ID\",\n required=True,\n info=\"The project ID or deployment space ID that is associated with the foundation model.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Watsonx API Key\",\n info=\"The API Key to use for the model.\",\n required=True,\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=[],\n value=None,\n dynamic=True,\n required=True,\n ),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate.\",\n range_spec=RangeSpec(min=1, max=4096),\n value=1000,\n ),\n StrInput(\n name=\"stop_sequence\",\n display_name=\"Stop Sequence\",\n advanced=True,\n info=\"Sequence where generation should stop.\",\n field_type=\"str\",\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n info=\"Controls randomness, higher values increase diversity.\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=2, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"top_p\",\n display_name=\"Top P\",\n info=\"The cumulative probability cutoff for token selection. \"\n \"Lower values mean sampling from a smaller, more top-weighted nucleus.\",\n value=0.9,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"frequency_penalty\",\n display_name=\"Frequency Penalty\",\n info=\"Penalty for frequency of token usage.\",\n value=0.5,\n range_spec=RangeSpec(min=-2.0, max=2.0, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"presence_penalty\",\n display_name=\"Presence Penalty\",\n info=\"Penalty for token presence in prior text.\",\n value=0.3,\n range_spec=RangeSpec(min=-2.0, max=2.0, step=0.01),\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Random Seed\",\n advanced=True,\n info=\"The random seed for the model.\",\n value=8,\n ),\n BoolInput(\n name=\"logprobs\",\n display_name=\"Log Probabilities\",\n advanced=True,\n info=\"Whether to return log probabilities of the output tokens.\",\n value=True,\n ),\n IntInput(\n name=\"top_logprobs\",\n display_name=\"Top Log Probabilities\",\n advanced=True,\n info=\"Number of most likely tokens to return at each position.\",\n value=3,\n range_spec=RangeSpec(min=1, max=20),\n ),\n StrInput(\n name=\"logit_bias\",\n display_name=\"Logit Bias\",\n advanced=True,\n info='JSON string of token IDs to bias or suppress (e.g., {\"1003\": -100, \"1004\": 100}).',\n field_type=\"str\",\n ),\n ]\n\n @staticmethod\n def fetch_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\"version\": \"2024-09-16\", \"filters\": \"function_text_chat,!lifecycle_withdrawn\"}\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models. Using default models.\")\n return WatsonxAIComponent._default_models\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):\n \"\"\"Update model options when URL or API key changes.\"\"\"\n logger.info(\"Updating build config. Field name: %s, Field value: %s\", field_name, field_value)\n\n if field_name == \"url\" and field_value:\n try:\n models = self.fetch_models(base_url=build_config.url.value)\n build_config.model_name.options = models\n if build_config.model_name.value:\n build_config.model_name.value = models[0]\n info_message = f\"Updated model options: {len(models)} models found in {build_config.url.value}\"\n logger.info(info_message)\n except Exception: # noqa: BLE001\n logger.exception(\"Error updating model options.\")\n\n def build_model(self) -> LanguageModel:\n # Parse logit_bias from JSON string if provided\n logit_bias = None\n if hasattr(self, \"logit_bias\") and self.logit_bias:\n try:\n logit_bias = json.loads(self.logit_bias)\n except json.JSONDecodeError:\n logger.warning(\"Invalid logit_bias JSON format. Using default instead.\")\n logit_bias = {\"1003\": -100, \"1004\": -100}\n\n chat_params = {\n \"max_tokens\": getattr(self, \"max_tokens\", None),\n \"temperature\": getattr(self, \"temperature\", None),\n \"top_p\": getattr(self, \"top_p\", None),\n \"frequency_penalty\": getattr(self, \"frequency_penalty\", None),\n \"presence_penalty\": getattr(self, \"presence_penalty\", None),\n \"seed\": getattr(self, \"seed\", None),\n \"stop\": [self.stop_sequence] if self.stop_sequence else [],\n \"n\": 1,\n \"logprobs\": getattr(self, \"logprobs\", True),\n \"top_logprobs\": getattr(self, \"top_logprobs\", None),\n \"time_limit\": 600000,\n \"logit_bias\": logit_bias,\n }\n\n return ChatWatsonx(\n apikey=SecretStr(self.api_key).get_secret_value(),\n url=self.url,\n project_id=self.project_id,\n model_id=self.model_name,\n params=chat_params,\n streaming=self.stream,\n )\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "frequency_penalty": { + "tool_mode": false, + "min_label": "", + "max_label": "", + "min_label_icon": "", + "max_label_icon": "", + "slider_buttons": false, + "slider_buttons_options": [], + "slider_input": false, + "range_spec": { + "step_type": "float", + "min": -2, + "max": 2, + "step": 0.01 }, - "showNode": true, - "type": "IBMwatsonxModel", - "id": "IBMwatsonxModel-qXZxc", - "selected_output": "text_output" + "required": false, + "placeholder": "", + "show": true, + "name": "frequency_penalty", + "value": 0.5, + "display_name": "Frequency Penalty", + "advanced": true, + "dynamic": false, + "info": "Penalty for frequency of token usage.", + "title_case": false, + "type": "slider", + "_input_type": "SliderInput" }, - "id": "IBMwatsonxModel-qXZxc", - "position": { - "x": 0, - "y": 0 + "input_value": { + "trace_as_input": true, + "tool_mode": false, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "input_value", + "value": "", + "display_name": "Input", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "", + "title_case": false, + "type": "str", + "_input_type": "MessageInput" }, - "type": "genericNode" - } - ], - "viewport": { - "x": 1, - "y": 1, - "zoom": 1 - } + "logit_bias": { + "tool_mode": false, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "logit_bias", + "value": "", + "display_name": "Logit Bias", + "advanced": true, + "dynamic": false, + "info": "JSON string of token IDs to bias or suppress (e.g., {\"1003\": -100, \"1004\": 100}).", + "title_case": false, + "type": "str", + "_input_type": "StrInput" + }, + "logprobs": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "logprobs", + "value": true, + "display_name": "Log Probabilities", + "advanced": true, + "dynamic": false, + "info": "Whether to return log probabilities of the output tokens.", + "title_case": false, + "type": "bool", + "_input_type": "BoolInput" + }, + "max_tokens": { + "tool_mode": false, + "trace_as_metadata": true, + "range_spec": { + "step_type": "float", + "min": 1, + "max": 4096, + "step": 0.1 + }, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "max_tokens", + "value": 1000, + "display_name": "Max Tokens", + "advanced": true, + "dynamic": false, + "info": "The maximum number of tokens to generate.", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "model_name": { + "tool_mode": false, + "trace_as_metadata": true, + "options": [], + "options_metadata": [], + "combobox": false, + "dialog_inputs": {}, + "toggle": false, + "required": true, + "placeholder": "", + "show": true, + "name": "model_name", + "display_name": "Model Name", + "advanced": false, + "dynamic": true, + "info": "", + "title_case": false, + "external_options": {}, + "type": "str", + "_input_type": "DropdownInput" + }, + "presence_penalty": { + "tool_mode": false, + "min_label": "", + "max_label": "", + "min_label_icon": "", + "max_label_icon": "", + "slider_buttons": false, + "slider_buttons_options": [], + "slider_input": false, + "range_spec": { + "step_type": "float", + "min": -2, + "max": 2, + "step": 0.01 + }, + "required": false, + "placeholder": "", + "show": true, + "name": "presence_penalty", + "value": 0.3, + "display_name": "Presence Penalty", + "advanced": true, + "dynamic": false, + "info": "Penalty for token presence in prior text.", + "title_case": false, + "type": "slider", + "_input_type": "SliderInput" + }, + "project_id": { + "tool_mode": false, + "trace_as_metadata": true, + "load_from_db": true, + "list": false, + "list_add_label": "Add More", + "required": true, + "placeholder": "", + "show": true, + "name": "project_id", + "value": "WATSONX_PROJECT_ID", + "display_name": "watsonx Project ID", + "advanced": false, + "dynamic": false, + "info": "The project ID or deployment space ID that is associated with the foundation model.", + "title_case": false, + "type": "str", + "_input_type": "StrInput" + }, + "seed": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "seed", + "value": 8, + "display_name": "Random Seed", + "advanced": true, + "dynamic": false, + "info": "The random seed for the model.", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "stop_sequence": { + "tool_mode": false, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "stop_sequence", + "value": "", + "display_name": "Stop Sequence", + "advanced": true, + "dynamic": false, + "info": "Sequence where generation should stop.", + "title_case": false, + "type": "str", + "_input_type": "StrInput" + }, + "stream": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "stream", + "value": false, + "display_name": "Stream", + "advanced": true, + "dynamic": false, + "info": "Stream the response from the model. Streaming works only in Chat.", + "title_case": false, + "type": "bool", + "_input_type": "BoolInput" + }, + "system_message": { + "tool_mode": false, + "trace_as_input": true, + "multiline": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "system_message", + "value": "", + "display_name": "System Message", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "System message to pass to the model.", + "title_case": false, + "copy_field": false, + "type": "str", + "_input_type": "MultilineInput" + }, + "temperature": { + "tool_mode": false, + "min_label": "", + "max_label": "", + "min_label_icon": "", + "max_label_icon": "", + "slider_buttons": false, + "slider_buttons_options": [], + "slider_input": false, + "range_spec": { + "step_type": "float", + "min": 0, + "max": 2, + "step": 0.01 + }, + "required": false, + "placeholder": "", + "show": true, + "name": "temperature", + "value": 0.1, + "display_name": "Temperature", + "advanced": true, + "dynamic": false, + "info": "Controls randomness, higher values increase diversity.", + "title_case": false, + "type": "slider", + "_input_type": "SliderInput" + }, + "top_logprobs": { + "tool_mode": false, + "trace_as_metadata": true, + "range_spec": { + "step_type": "float", + "min": 1, + "max": 20, + "step": 0.1 + }, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "top_logprobs", + "value": 3, + "display_name": "Top Log Probabilities", + "advanced": true, + "dynamic": false, + "info": "Number of most likely tokens to return at each position.", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "top_p": { + "tool_mode": false, + "min_label": "", + "max_label": "", + "min_label_icon": "", + "max_label_icon": "", + "slider_buttons": false, + "slider_buttons_options": [], + "slider_input": false, + "range_spec": { + "step_type": "float", + "min": 0, + "max": 1, + "step": 0.01 + }, + "required": false, + "placeholder": "", + "show": true, + "name": "top_p", + "value": 0.9, + "display_name": "Top P", + "advanced": true, + "dynamic": false, + "info": "The cumulative probability cutoff for token selection. Lower values mean sampling from a smaller, more top-weighted nucleus.", + "title_case": false, + "type": "slider", + "_input_type": "SliderInput" + }, + "url": { + "tool_mode": false, + "trace_as_metadata": true, + "options": [ + "https://us-south.ml.cloud.ibm.com", + "https://eu-de.ml.cloud.ibm.com", + "https://eu-gb.ml.cloud.ibm.com", + "https://au-syd.ml.cloud.ibm.com", + "https://jp-tok.ml.cloud.ibm.com", + "https://ca-tor.ml.cloud.ibm.com" + ], + "options_metadata": [], + "combobox": false, + "dialog_inputs": {}, + "toggle": false, + "required": false, + "placeholder": "", + "show": true, + "name": "url", + "display_name": "watsonx API Endpoint", + "advanced": false, + "dynamic": false, + "info": "The base URL of the API.", + "real_time_refresh": true, + "title_case": false, + "external_options": {}, + "type": "str", + "_input_type": "DropdownInput" + } + }, + "description": "Generate text using IBM watsonx.ai foundation models.", + "icon": "WatsonxAI", + "base_classes": [ + "LanguageModel", + "Message" + ], + "display_name": "IBM watsonx.ai", + "documentation": "", + "minimized": false, + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Message" + ], + "selected": "Message", + "name": "text_output", + "display_name": "Model Response", + "method": "text_response", + "value": "__UNDEFINED__", + "cache": true, + "allows_loop": false, + "group_outputs": false, + "tool_mode": true + }, + { + "types": [ + "LanguageModel" + ], + "name": "model_output", + "display_name": "Language Model", + "method": "build_model", + "value": "__UNDEFINED__", + "cache": true, + "allows_loop": false, + "group_outputs": false, + "tool_mode": true + } + ], + "field_order": [ + "input_value", + "system_message", + "stream", + "url", + "project_id", + "api_key", + "model_name", + "max_tokens", + "stop_sequence", + "temperature", + "top_p", + "frequency_penalty", + "presence_penalty", + "seed", + "logprobs", + "top_logprobs", + "logit_bias" + ], + "beta": false, + "legacy": false, + "edited": false, + "metadata": { + "keywords": [ + "model", + "llm", + "language model", + "large language model" + ], + "module": "lfx.components.ibm.watsonx.WatsonxAIComponent", + "code_hash": "85c24939214c", + "dependencies": { + "total_dependencies": 4, + "dependencies": [ + { + "name": "requests", + "version": "2.32.5" + }, + { + "name": "langchain_ibm", + "version": "0.3.16" + }, + { + "name": "pydantic", + "version": "2.10.6" + }, + { + "name": "lfx", + "version": null + } + ] + } + }, + "tool_mode": false, + "official": false + }, + "showNode": true, + "type": "IBMwatsonxModel", + "id": "IBMwatsonxModel-qXZxc", + "selected_output": "text_output" }, - "description": "Generate text using IBM watsonx.ai foundation models.", - "name": "IBM watsonx.ai", "id": "IBMwatsonxModel-qXZxc", - "is_component": true, - "last_tested_version": "1.6.0" + "position": { + "x": 0, + "y": 0 + }, + "type": "genericNode" } \ No newline at end of file