{ "data": { "node": { "template": { "_type": "Component", "api_key": { "load_from_db": true, "required": true, "placeholder": "", "show": true, "name": "api_key", "value": "WATSONX_API_KEY", "display_name": "Watsonx API Key", "advanced": false, "input_types": [], "dynamic": false, "info": "The API Key to use for the model.", "title_case": false, "password": true, "type": "str", "_input_type": "SecretStrInput" }, "code": { "type": "code", "required": true, "placeholder": "", "list": false, "show": true, "multiline": true, "value": "import json\nfrom typing import Any\n\nimport requests\nfrom langchain_ibm import ChatWatsonx\nfrom pydantic.v1 import SecretStr\n\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\n\n\nclass WatsonxAIComponent(LCModelComponent):\n display_name = \"IBM watsonx.ai\"\n description = \"Generate text using IBM watsonx.ai foundation models.\"\n icon = \"WatsonxAI\"\n name = \"IBMwatsonxModel\"\n beta = False\n\n _default_models = [\"ibm/granite-3-2b-instruct\", \"ibm/granite-3-8b-instruct\", \"ibm/granite-13b-instruct-v2\"]\n\n inputs = [\n *LCModelComponent.get_base_inputs(),\n DropdownInput(\n name=\"url\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API.\",\n value=None,\n options=[\n \"https://us-south.ml.cloud.ibm.com\",\n \"https://eu-de.ml.cloud.ibm.com\",\n \"https://eu-gb.ml.cloud.ibm.com\",\n \"https://au-syd.ml.cloud.ibm.com\",\n \"https://jp-tok.ml.cloud.ibm.com\",\n \"https://ca-tor.ml.cloud.ibm.com\",\n ],\n real_time_refresh=True,\n ),\n StrInput(\n name=\"project_id\",\n display_name=\"watsonx Project ID\",\n required=True,\n info=\"The project ID or deployment space ID that is associated with the foundation model.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Watsonx API Key\",\n info=\"The API Key to use for the model.\",\n required=True,\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=[],\n value=None,\n dynamic=True,\n required=True,\n ),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate.\",\n range_spec=RangeSpec(min=1, max=4096),\n value=1000,\n ),\n StrInput(\n name=\"stop_sequence\",\n display_name=\"Stop Sequence\",\n advanced=True,\n info=\"Sequence where generation should stop.\",\n field_type=\"str\",\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n info=\"Controls randomness, higher values increase diversity.\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=2, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"top_p\",\n display_name=\"Top P\",\n info=\"The cumulative probability cutoff for token selection. \"\n \"Lower values mean sampling from a smaller, more top-weighted nucleus.\",\n value=0.9,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"frequency_penalty\",\n display_name=\"Frequency Penalty\",\n info=\"Penalty for frequency of token usage.\",\n value=0.5,\n range_spec=RangeSpec(min=-2.0, max=2.0, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"presence_penalty\",\n display_name=\"Presence Penalty\",\n info=\"Penalty for token presence in prior text.\",\n value=0.3,\n range_spec=RangeSpec(min=-2.0, max=2.0, step=0.01),\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Random Seed\",\n advanced=True,\n info=\"The random seed for the model.\",\n value=8,\n ),\n BoolInput(\n name=\"logprobs\",\n display_name=\"Log Probabilities\",\n advanced=True,\n info=\"Whether to return log probabilities of the output tokens.\",\n value=True,\n ),\n IntInput(\n name=\"top_logprobs\",\n display_name=\"Top Log Probabilities\",\n advanced=True,\n info=\"Number of most likely tokens to return at each position.\",\n value=3,\n range_spec=RangeSpec(min=1, max=20),\n ),\n StrInput(\n name=\"logit_bias\",\n display_name=\"Logit Bias\",\n advanced=True,\n info='JSON string of token IDs to bias or suppress (e.g., {\"1003\": -100, \"1004\": 100}).',\n field_type=\"str\",\n ),\n ]\n\n @staticmethod\n def fetch_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\"version\": \"2024-09-16\", \"filters\": \"function_text_chat,!lifecycle_withdrawn\"}\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models. Using default models.\")\n return WatsonxAIComponent._default_models\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):\n \"\"\"Update model options when URL or API key changes.\"\"\"\n logger.info(\"Updating build config. Field name: %s, Field value: %s\", field_name, field_value)\n\n if field_name == \"url\" and field_value:\n try:\n models = self.fetch_models(base_url=build_config.url.value)\n build_config.model_name.options = models\n if build_config.model_name.value:\n build_config.model_name.value = models[0]\n info_message = f\"Updated model options: {len(models)} models found in {build_config.url.value}\"\n logger.info(info_message)\n except Exception: # noqa: BLE001\n logger.exception(\"Error updating model options.\")\n\n def build_model(self) -> LanguageModel:\n # Parse logit_bias from JSON string if provided\n logit_bias = None\n if hasattr(self, \"logit_bias\") and self.logit_bias:\n try:\n logit_bias = json.loads(self.logit_bias)\n except json.JSONDecodeError:\n logger.warning(\"Invalid logit_bias JSON format. Using default instead.\")\n logit_bias = {\"1003\": -100, \"1004\": -100}\n\n chat_params = {\n \"max_tokens\": getattr(self, \"max_tokens\", None),\n \"temperature\": getattr(self, \"temperature\", None),\n \"top_p\": getattr(self, \"top_p\", None),\n \"frequency_penalty\": getattr(self, \"frequency_penalty\", None),\n \"presence_penalty\": getattr(self, \"presence_penalty\", None),\n \"seed\": getattr(self, \"seed\", None),\n \"stop\": [self.stop_sequence] if self.stop_sequence else [],\n \"n\": 1,\n \"logprobs\": getattr(self, \"logprobs\", True),\n \"top_logprobs\": getattr(self, \"top_logprobs\", None),\n \"time_limit\": 600000,\n \"logit_bias\": logit_bias,\n }\n\n return ChatWatsonx(\n apikey=SecretStr(self.api_key).get_secret_value(),\n url=self.url,\n project_id=self.project_id,\n model_id=self.model_name,\n params=chat_params,\n streaming=self.stream,\n )\n", "fileTypes": [], "file_path": "", "password": false, "name": "code", "advanced": true, "dynamic": true, "info": "", "load_from_db": false, "title_case": false }, "frequency_penalty": { "tool_mode": false, "min_label": "", "max_label": "", "min_label_icon": "", "max_label_icon": "", "slider_buttons": false, "slider_buttons_options": [], "slider_input": false, "range_spec": { "step_type": "float", "min": -2, "max": 2, "step": 0.01 }, "required": false, "placeholder": "", "show": true, "name": "frequency_penalty", "value": 0.5, "display_name": "Frequency Penalty", "advanced": true, "dynamic": false, "info": "Penalty for frequency of token usage.", "title_case": false, "type": "slider", "_input_type": "SliderInput" }, "input_value": { "trace_as_input": true, "tool_mode": false, "trace_as_metadata": true, "load_from_db": false, "list": false, "list_add_label": "Add More", "required": false, "placeholder": "", "show": true, "name": "input_value", "value": "", "display_name": "Input", "advanced": false, "input_types": [ "Message" ], "dynamic": false, "info": "", "title_case": false, "type": "str", "_input_type": "MessageInput" }, "logit_bias": { "tool_mode": false, "trace_as_metadata": true, "load_from_db": false, "list": false, "list_add_label": "Add More", "required": false, "placeholder": "", "show": true, "name": "logit_bias", "value": "", "display_name": "Logit Bias", "advanced": true, "dynamic": false, "info": "JSON string of token IDs to bias or suppress (e.g., {\"1003\": -100, \"1004\": 100}).", "title_case": false, "type": "str", "_input_type": "StrInput" }, "logprobs": { "tool_mode": false, "trace_as_metadata": true, "list": false, "list_add_label": "Add More", "required": false, "placeholder": "", "show": true, "name": "logprobs", "value": true, "display_name": "Log Probabilities", "advanced": true, "dynamic": false, "info": "Whether to return log probabilities of the output tokens.", "title_case": false, "type": "bool", "_input_type": "BoolInput" }, "max_tokens": { "tool_mode": false, "trace_as_metadata": true, "range_spec": { "step_type": "float", "min": 1, "max": 4096, "step": 0.1 }, "list": false, "list_add_label": "Add More", "required": false, "placeholder": "", "show": true, "name": "max_tokens", "value": 1000, "display_name": "Max Tokens", "advanced": true, "dynamic": false, "info": "The maximum number of tokens to generate.", "title_case": false, "type": "int", "_input_type": "IntInput" }, "model_name": { "tool_mode": false, "trace_as_metadata": true, "options": [], "options_metadata": [], "combobox": false, "dialog_inputs": {}, "toggle": false, "required": true, "placeholder": "", "show": true, "name": "model_name", "display_name": "Model Name", "advanced": false, "dynamic": true, "info": "", "title_case": false, "external_options": {}, "type": "str", "_input_type": "DropdownInput" }, "presence_penalty": { "tool_mode": false, "min_label": "", "max_label": "", "min_label_icon": "", "max_label_icon": "", "slider_buttons": false, "slider_buttons_options": [], "slider_input": false, "range_spec": { "step_type": "float", "min": -2, "max": 2, "step": 0.01 }, "required": false, "placeholder": "", "show": true, "name": "presence_penalty", "value": 0.3, "display_name": "Presence Penalty", "advanced": true, "dynamic": false, "info": "Penalty for token presence in prior text.", "title_case": false, "type": "slider", "_input_type": "SliderInput" }, "project_id": { "tool_mode": false, "trace_as_metadata": true, "load_from_db": true, "list": false, "list_add_label": "Add More", "required": true, "placeholder": "", "show": true, "name": "project_id", "value": "WATSONX_PROJECT_ID", "display_name": "watsonx Project ID", "advanced": false, "dynamic": false, "info": "The project ID or deployment space ID that is associated with the foundation model.", "title_case": false, "type": "str", "_input_type": "StrInput" }, "seed": { "tool_mode": false, "trace_as_metadata": true, "list": false, "list_add_label": "Add More", "required": false, "placeholder": "", "show": true, "name": "seed", "value": 8, "display_name": "Random Seed", "advanced": true, "dynamic": false, "info": "The random seed for the model.", "title_case": false, "type": "int", "_input_type": "IntInput" }, "stop_sequence": { "tool_mode": false, "trace_as_metadata": true, "load_from_db": false, "list": false, "list_add_label": "Add More", "required": false, "placeholder": "", "show": true, "name": "stop_sequence", "value": "", "display_name": "Stop Sequence", "advanced": true, "dynamic": false, "info": "Sequence where generation should stop.", "title_case": false, "type": "str", "_input_type": "StrInput" }, "stream": { "tool_mode": false, "trace_as_metadata": true, "list": false, "list_add_label": "Add More", "required": false, "placeholder": "", "show": true, "name": "stream", "value": false, "display_name": "Stream", "advanced": true, "dynamic": false, "info": "Stream the response from the model. Streaming works only in Chat.", "title_case": false, "type": "bool", "_input_type": "BoolInput" }, "system_message": { "tool_mode": false, "trace_as_input": true, "multiline": true, "trace_as_metadata": true, "load_from_db": false, "list": false, "list_add_label": "Add More", "required": false, "placeholder": "", "show": true, "name": "system_message", "value": "", "display_name": "System Message", "advanced": false, "input_types": [ "Message" ], "dynamic": false, "info": "System message to pass to the model.", "title_case": false, "copy_field": false, "type": "str", "_input_type": "MultilineInput" }, "temperature": { "tool_mode": false, "min_label": "", "max_label": "", "min_label_icon": "", "max_label_icon": "", "slider_buttons": false, "slider_buttons_options": [], "slider_input": false, "range_spec": { "step_type": "float", "min": 0, "max": 2, "step": 0.01 }, "required": false, "placeholder": "", "show": true, "name": "temperature", "value": 0.1, "display_name": "Temperature", "advanced": true, "dynamic": false, "info": "Controls randomness, higher values increase diversity.", "title_case": false, "type": "slider", "_input_type": "SliderInput" }, "top_logprobs": { "tool_mode": false, "trace_as_metadata": true, "range_spec": { "step_type": "float", "min": 1, "max": 20, "step": 0.1 }, "list": false, "list_add_label": "Add More", "required": false, "placeholder": "", "show": true, "name": "top_logprobs", "value": 3, "display_name": "Top Log Probabilities", "advanced": true, "dynamic": false, "info": "Number of most likely tokens to return at each position.", "title_case": false, "type": "int", "_input_type": "IntInput" }, "top_p": { "tool_mode": false, "min_label": "", "max_label": "", "min_label_icon": "", "max_label_icon": "", "slider_buttons": false, "slider_buttons_options": [], "slider_input": false, "range_spec": { "step_type": "float", "min": 0, "max": 1, "step": 0.01 }, "required": false, "placeholder": "", "show": true, "name": "top_p", "value": 0.9, "display_name": "Top P", "advanced": true, "dynamic": false, "info": "The cumulative probability cutoff for token selection. Lower values mean sampling from a smaller, more top-weighted nucleus.", "title_case": false, "type": "slider", "_input_type": "SliderInput" }, "url": { "tool_mode": false, "trace_as_metadata": true, "options": [ "https://us-south.ml.cloud.ibm.com", "https://eu-de.ml.cloud.ibm.com", "https://eu-gb.ml.cloud.ibm.com", "https://au-syd.ml.cloud.ibm.com", "https://jp-tok.ml.cloud.ibm.com", "https://ca-tor.ml.cloud.ibm.com" ], "options_metadata": [], "combobox": false, "dialog_inputs": {}, "toggle": false, "required": false, "placeholder": "", "show": true, "name": "url", "display_name": "watsonx API Endpoint", "advanced": false, "dynamic": false, "info": "The base URL of the API.", "real_time_refresh": true, "title_case": false, "external_options": {}, "type": "str", "_input_type": "DropdownInput" } }, "description": "Generate text using IBM watsonx.ai foundation models.", "icon": "WatsonxAI", "base_classes": [ "LanguageModel", "Message" ], "display_name": "IBM watsonx.ai", "documentation": "", "minimized": false, "custom_fields": {}, "output_types": [], "pinned": false, "conditional_paths": [], "frozen": false, "outputs": [ { "types": [ "Message" ], "name": "text_output", "display_name": "Model Response", "method": "text_response", "value": "__UNDEFINED__", "cache": true, "allows_loop": false, "group_outputs": false, "tool_mode": true }, { "types": [ "LanguageModel" ], "selected": "LanguageModel", "name": "model_output", "display_name": "Language Model", "method": "build_model", "value": "__UNDEFINED__", "cache": true, "allows_loop": false, "group_outputs": false, "tool_mode": true } ], "field_order": [ "input_value", "system_message", "stream", "url", "project_id", "api_key", "model_name", "max_tokens", "stop_sequence", "temperature", "top_p", "frequency_penalty", "presence_penalty", "seed", "logprobs", "top_logprobs", "logit_bias" ], "beta": false, "legacy": false, "edited": false, "metadata": { "keywords": [ "model", "llm", "language model", "large language model" ], "module": "lfx.components.ibm.watsonx.WatsonxAIComponent", "code_hash": "85c24939214c", "dependencies": { "total_dependencies": 4, "dependencies": [ { "name": "requests", "version": "2.32.5" }, { "name": "langchain_ibm", "version": "0.3.16" }, { "name": "pydantic", "version": "2.10.6" }, { "name": "lfx", "version": null } ] } }, "tool_mode": false, "official": false }, "showNode": true, "type": "IBMwatsonxModel", "id": "IBMwatsonxModel-qXZxc", "selected_output": "model_output" }, "id": "IBMwatsonxModel-qXZxc", "position": { "x": 0, "y": 0 }, "type": "genericNode" }