openrag/flows/components/ollama_llm.json

638 lines
32 KiB
JSON

{
"data": {
"node": {
"template": {
"_type": "Component",
"base_url": {
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"load_from_db": false,
"list": false,
"list_add_label": "Add More",
"required": false,
"placeholder": "",
"show": true,
"name": "base_url",
"value": "",
"display_name": "Base URL",
"advanced": false,
"input_types": ["Message"],
"dynamic": false,
"info": "Endpoint of the Ollama API.",
"real_time_refresh": true,
"title_case": false,
"type": "str",
"_input_type": "MessageTextInput"
},
"code": {
"type": "code",
"required": true,
"placeholder": "",
"list": false,
"show": true,
"multiline": true,
"value": "import asyncio\nfrom typing import Any\nfrom urllib.parse import urljoin\n\nimport httpx\nfrom langchain_ollama import ChatOllama\n\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, MessageTextInput, SliderInput\nfrom lfx.log.logger import logger\nfrom lfx.utils.util import transform_localhost_url\n\nHTTP_STATUS_OK = 200\n\n\nclass ChatOllamaComponent(LCModelComponent):\n display_name = \"Ollama\"\n description = \"Generate text using Ollama Local LLMs.\"\n icon = \"Ollama\"\n name = \"OllamaModel\"\n\n # Define constants for JSON keys\n JSON_MODELS_KEY = \"models\"\n JSON_NAME_KEY = \"name\"\n JSON_CAPABILITIES_KEY = \"capabilities\"\n DESIRED_CAPABILITY = \"completion\"\n TOOL_CALLING_CAPABILITY = \"tools\"\n\n inputs = [\n MessageTextInput(\n name=\"base_url\",\n display_name=\"Base URL\",\n info=\"Endpoint of the Ollama API.\",\n value=\"\",\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=[],\n info=\"Refer to https://ollama.com/library for more models.\",\n refresh_button=True,\n real_time_refresh=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n MessageTextInput(\n name=\"format\", display_name=\"Format\", info=\"Specify the format of the output (e.g., json).\", advanced=True\n ),\n DictInput(name=\"metadata\", display_name=\"Metadata\", info=\"Metadata to add to the run trace.\", advanced=True),\n DropdownInput(\n name=\"mirostat\",\n display_name=\"Mirostat\",\n options=[\"Disabled\", \"Mirostat\", \"Mirostat 2.0\"],\n info=\"Enable/disable Mirostat sampling for controlling perplexity.\",\n value=\"Disabled\",\n advanced=True,\n real_time_refresh=True,\n ),\n FloatInput(\n name=\"mirostat_eta\",\n display_name=\"Mirostat Eta\",\n info=\"Learning rate for Mirostat algorithm. (Default: 0.1)\",\n advanced=True,\n ),\n FloatInput(\n name=\"mirostat_tau\",\n display_name=\"Mirostat Tau\",\n info=\"Controls the balance between coherence and diversity of the output. (Default: 5.0)\",\n advanced=True,\n ),\n IntInput(\n name=\"num_ctx\",\n display_name=\"Context Window Size\",\n info=\"Size of the context window for generating tokens. (Default: 2048)\",\n advanced=True,\n ),\n IntInput(\n name=\"num_gpu\",\n display_name=\"Number of GPUs\",\n info=\"Number of GPUs to use for computation. (Default: 1 on macOS, 0 to disable)\",\n advanced=True,\n ),\n IntInput(\n name=\"num_thread\",\n display_name=\"Number of Threads\",\n info=\"Number of threads to use during computation. (Default: detected for optimal performance)\",\n advanced=True,\n ),\n IntInput(\n name=\"repeat_last_n\",\n display_name=\"Repeat Last N\",\n info=\"How far back the model looks to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)\",\n advanced=True,\n ),\n FloatInput(\n name=\"repeat_penalty\",\n display_name=\"Repeat Penalty\",\n info=\"Penalty for repetitions in generated text. (Default: 1.1)\",\n advanced=True,\n ),\n FloatInput(name=\"tfs_z\", display_name=\"TFS Z\", info=\"Tail free sampling value. (Default: 1)\", advanced=True),\n IntInput(name=\"timeout\", display_name=\"Timeout\", info=\"Timeout for the request stream.\", advanced=True),\n IntInput(\n name=\"top_k\", display_name=\"Top K\", info=\"Limits token selection to top K. (Default: 40)\", advanced=True\n ),\n FloatInput(name=\"top_p\", display_name=\"Top P\", info=\"Works together with top-k. (Default: 0.9)\", advanced=True),\n BoolInput(name=\"verbose\", display_name=\"Verbose\", info=\"Whether to print out response text.\", advanced=True),\n MessageTextInput(\n name=\"tags\",\n display_name=\"Tags\",\n info=\"Comma-separated list of tags to add to the run trace.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"stop_tokens\",\n display_name=\"Stop Tokens\",\n info=\"Comma-separated list of tokens to signal the model to stop generating text.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"system\", display_name=\"System\", info=\"System to use for generating text.\", advanced=True\n ),\n BoolInput(\n name=\"tool_model_enabled\",\n display_name=\"Tool Model Enabled\",\n info=\"Whether to enable tool calling in the model.\",\n value=True,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"template\", display_name=\"Template\", info=\"Template to use for generating text.\", advanced=True\n ),\n *LCModelComponent.get_base_inputs(),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # Mapping mirostat settings to their corresponding values\n mirostat_options = {\"Mirostat\": 1, \"Mirostat 2.0\": 2}\n\n # Default to 0 for 'Disabled'\n mirostat_value = mirostat_options.get(self.mirostat, 0)\n\n # Set mirostat_eta and mirostat_tau to None if mirostat is disabled\n if mirostat_value == 0:\n mirostat_eta = None\n mirostat_tau = None\n else:\n mirostat_eta = self.mirostat_eta\n mirostat_tau = self.mirostat_tau\n\n transformed_base_url = transform_localhost_url(self.base_url)\n # Mapping system settings to their corresponding values\n llm_params = {\n \"base_url\": transformed_base_url,\n \"model\": self.model_name,\n \"mirostat\": mirostat_value,\n \"format\": self.format,\n \"metadata\": self.metadata,\n \"tags\": self.tags.split(\",\") if self.tags else None,\n \"mirostat_eta\": mirostat_eta,\n \"mirostat_tau\": mirostat_tau,\n \"num_ctx\": self.num_ctx or None,\n \"num_gpu\": self.num_gpu or None,\n \"num_thread\": self.num_thread or None,\n \"repeat_last_n\": self.repeat_last_n or None,\n \"repeat_penalty\": self.repeat_penalty or None,\n \"temperature\": self.temperature or None,\n \"stop\": self.stop_tokens.split(\",\") if self.stop_tokens else None,\n \"system\": self.system,\n \"tfs_z\": self.tfs_z or None,\n \"timeout\": self.timeout or None,\n \"top_k\": self.top_k or None,\n \"top_p\": self.top_p or None,\n \"verbose\": self.verbose,\n \"template\": self.template,\n }\n\n # Remove parameters with None values\n llm_params = {k: v for k, v in llm_params.items() if v is not None}\n\n try:\n output = ChatOllama(**llm_params)\n except Exception as e:\n msg = (\n \"Unable to connect to the Ollama API. \",\n \"Please verify the base URL, ensure the relevant Ollama model is pulled, and try again.\",\n )\n raise ValueError(msg) from e\n\n return output\n\n async def is_valid_ollama_url(self, url: str) -> bool:\n try:\n async with httpx.AsyncClient() as client:\n url = transform_localhost_url(url)\n return (await client.get(urljoin(url, \"api/tags\"))).status_code == HTTP_STATUS_OK\n except httpx.RequestError:\n return False\n\n async def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None):\n if field_name == \"mirostat\":\n if field_value == \"Disabled\":\n build_config[\"mirostat_eta\"][\"advanced\"] = True\n build_config[\"mirostat_tau\"][\"advanced\"] = True\n build_config[\"mirostat_eta\"][\"value\"] = None\n build_config[\"mirostat_tau\"][\"value\"] = None\n\n else:\n build_config[\"mirostat_eta\"][\"advanced\"] = False\n build_config[\"mirostat_tau\"][\"advanced\"] = False\n\n if field_value == \"Mirostat 2.0\":\n build_config[\"mirostat_eta\"][\"value\"] = 0.2\n build_config[\"mirostat_tau\"][\"value\"] = 10\n else:\n build_config[\"mirostat_eta\"][\"value\"] = 0.1\n build_config[\"mirostat_tau\"][\"value\"] = 5\n\n if field_name in {\"base_url\", \"model_name\"} and not await self.is_valid_ollama_url(self.base_url):\n msg = \"Ollama is not running on the provided base URL. Please start Ollama and try again.\"\n raise ValueError(msg)\n if field_name in {\"model_name\", \"base_url\", \"tool_model_enabled\"}:\n if await self.is_valid_ollama_url(self.base_url):\n tool_model_enabled = build_config[\"tool_model_enabled\"].get(\"value\", False) or self.tool_model_enabled\n build_config[\"model_name\"][\"options\"] = await self.get_models(\n self.base_url, tool_model_enabled=tool_model_enabled\n )\n else:\n build_config[\"model_name\"][\"options\"] = []\n if field_name == \"keep_alive_flag\":\n if field_value == \"Keep\":\n build_config[\"keep_alive\"][\"value\"] = \"-1\"\n build_config[\"keep_alive\"][\"advanced\"] = True\n elif field_value == \"Immediately\":\n build_config[\"keep_alive\"][\"value\"] = \"0\"\n build_config[\"keep_alive\"][\"advanced\"] = True\n else:\n build_config[\"keep_alive\"][\"advanced\"] = False\n\n return build_config\n\n async def get_models(self, base_url_value: str, *, tool_model_enabled: bool | None = None) -> list[str]:\n \"\"\"Fetches a list of models from the Ollama API that do not have the \"embedding\" capability.\n\n Args:\n base_url_value (str): The base URL of the Ollama API.\n tool_model_enabled (bool | None, optional): If True, filters the models further to include\n only those that support tool calling. Defaults to None.\n\n Returns:\n list[str]: A list of model names that do not have the \"embedding\" capability. If\n `tool_model_enabled` is True, only models supporting tool calling are included.\n\n Raises:\n ValueError: If there is an issue with the API request or response, or if the model\n names cannot be retrieved.\n \"\"\"\n try:\n # Normalize the base URL to avoid the repeated \"/\" at the end\n base_url = base_url_value.rstrip(\"/\") + \"/\"\n base_url = transform_localhost_url(base_url)\n\n # Ollama REST API to return models\n tags_url = urljoin(base_url, \"api/tags\")\n\n # Ollama REST API to return model capabilities\n show_url = urljoin(base_url, \"api/show\")\n\n async with httpx.AsyncClient() as client:\n # Fetch available models\n tags_response = await client.get(tags_url)\n tags_response.raise_for_status()\n models = tags_response.json()\n if asyncio.iscoroutine(models):\n models = await models\n await logger.adebug(f\"Available models: {models}\")\n\n # Filter models that are NOT embedding models\n model_ids = []\n for model in models[self.JSON_MODELS_KEY]:\n model_name = model[self.JSON_NAME_KEY]\n await logger.adebug(f\"Checking model: {model_name}\")\n\n payload = {\"model\": model_name}\n show_response = await client.post(show_url, json=payload)\n show_response.raise_for_status()\n json_data = show_response.json()\n if asyncio.iscoroutine(json_data):\n json_data = await json_data\n capabilities = json_data.get(self.JSON_CAPABILITIES_KEY, [])\n await logger.adebug(f\"Model: {model_name}, Capabilities: {capabilities}\")\n\n if self.DESIRED_CAPABILITY in capabilities and (\n not tool_model_enabled or self.TOOL_CALLING_CAPABILITY in capabilities\n ):\n model_ids.append(model_name)\n\n except (httpx.RequestError, ValueError) as e:\n msg = \"Could not get model names from Ollama.\"\n raise ValueError(msg) from e\n\n return model_ids\n",
"fileTypes": [],
"file_path": "",
"password": false,
"name": "code",
"advanced": true,
"dynamic": true,
"info": "",
"load_from_db": false,
"title_case": false
},
"format": {
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"load_from_db": false,
"list": false,
"list_add_label": "Add More",
"required": false,
"placeholder": "",
"show": true,
"name": "format",
"value": "",
"display_name": "Format",
"advanced": true,
"input_types": ["Message"],
"dynamic": false,
"info": "Specify the format of the output (e.g., json).",
"title_case": false,
"type": "str",
"_input_type": "MessageTextInput"
},
"input_value": {
"trace_as_input": true,
"tool_mode": false,
"trace_as_metadata": true,
"load_from_db": false,
"list": false,
"list_add_label": "Add More",
"required": false,
"placeholder": "",
"show": true,
"name": "input_value",
"value": "",
"display_name": "Input",
"advanced": false,
"input_types": ["Message"],
"dynamic": false,
"info": "",
"title_case": false,
"type": "str",
"_input_type": "MessageInput"
},
"metadata": {
"tool_mode": false,
"trace_as_input": true,
"list": false,
"list_add_label": "Add More",
"required": false,
"placeholder": "",
"show": true,
"name": "metadata",
"value": {},
"display_name": "Metadata",
"advanced": true,
"dynamic": false,
"info": "Metadata to add to the run trace.",
"title_case": false,
"type": "dict",
"_input_type": "DictInput"
},
"mirostat": {
"tool_mode": false,
"trace_as_metadata": true,
"options": ["Disabled", "Mirostat", "Mirostat 2.0"],
"options_metadata": [],
"combobox": false,
"dialog_inputs": {},
"toggle": false,
"required": false,
"placeholder": "",
"show": true,
"name": "mirostat",
"value": "Disabled",
"display_name": "Mirostat",
"advanced": true,
"dynamic": false,
"info": "Enable/disable Mirostat sampling for controlling perplexity.",
"real_time_refresh": true,
"title_case": false,
"external_options": {},
"type": "str",
"_input_type": "DropdownInput"
},
"mirostat_eta": {
"tool_mode": false,
"trace_as_metadata": true,
"list": false,
"list_add_label": "Add More",
"required": false,
"placeholder": "",
"show": true,
"name": "mirostat_eta",
"value": "",
"display_name": "Mirostat Eta",
"advanced": true,
"dynamic": false,
"info": "Learning rate for Mirostat algorithm. (Default: 0.1)",
"title_case": false,
"type": "float",
"_input_type": "FloatInput"
},
"mirostat_tau": {
"tool_mode": false,
"trace_as_metadata": true,
"list": false,
"list_add_label": "Add More",
"required": false,
"placeholder": "",
"show": true,
"name": "mirostat_tau",
"value": "",
"display_name": "Mirostat Tau",
"advanced": true,
"dynamic": false,
"info": "Controls the balance between coherence and diversity of the output. (Default: 5.0)",
"title_case": false,
"type": "float",
"_input_type": "FloatInput"
},
"model_name": {
"tool_mode": false,
"trace_as_metadata": true,
"options": ["gpt-oss:20b", "qwen3:4b"],
"options_metadata": [],
"combobox": false,
"dialog_inputs": {},
"toggle": false,
"required": false,
"placeholder": "",
"show": true,
"name": "model_name",
"value": "",
"display_name": "Model Name",
"advanced": false,
"dynamic": false,
"info": "Refer to https://ollama.com/library for more models.",
"real_time_refresh": true,
"refresh_button": true,
"title_case": false,
"external_options": {},
"type": "str",
"_input_type": "DropdownInput"
},
"num_ctx": {
"tool_mode": false,
"trace_as_metadata": true,
"list": false,
"list_add_label": "Add More",
"required": false,
"placeholder": "",
"show": true,
"name": "num_ctx",
"value": "",
"display_name": "Context Window Size",
"advanced": true,
"dynamic": false,
"info": "Size of the context window for generating tokens. (Default: 2048)",
"title_case": false,
"type": "int",
"_input_type": "IntInput"
},
"num_gpu": {
"tool_mode": false,
"trace_as_metadata": true,
"list": false,
"list_add_label": "Add More",
"required": false,
"placeholder": "",
"show": true,
"name": "num_gpu",
"value": "",
"display_name": "Number of GPUs",
"advanced": true,
"dynamic": false,
"info": "Number of GPUs to use for computation. (Default: 1 on macOS, 0 to disable)",
"title_case": false,
"type": "int",
"_input_type": "IntInput"
},
"num_thread": {
"tool_mode": false,
"trace_as_metadata": true,
"list": false,
"list_add_label": "Add More",
"required": false,
"placeholder": "",
"show": true,
"name": "num_thread",
"value": "",
"display_name": "Number of Threads",
"advanced": true,
"dynamic": false,
"info": "Number of threads to use during computation. (Default: detected for optimal performance)",
"title_case": false,
"type": "int",
"_input_type": "IntInput"
},
"repeat_last_n": {
"tool_mode": false,
"trace_as_metadata": true,
"list": false,
"list_add_label": "Add More",
"required": false,
"placeholder": "",
"show": true,
"name": "repeat_last_n",
"value": "",
"display_name": "Repeat Last N",
"advanced": true,
"dynamic": false,
"info": "How far back the model looks to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)",
"title_case": false,
"type": "int",
"_input_type": "IntInput"
},
"repeat_penalty": {
"tool_mode": false,
"trace_as_metadata": true,
"list": false,
"list_add_label": "Add More",
"required": false,
"placeholder": "",
"show": true,
"name": "repeat_penalty",
"value": "",
"display_name": "Repeat Penalty",
"advanced": true,
"dynamic": false,
"info": "Penalty for repetitions in generated text. (Default: 1.1)",
"title_case": false,
"type": "float",
"_input_type": "FloatInput"
},
"stop_tokens": {
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"load_from_db": false,
"list": false,
"list_add_label": "Add More",
"required": false,
"placeholder": "",
"show": true,
"name": "stop_tokens",
"value": "",
"display_name": "Stop Tokens",
"advanced": true,
"input_types": ["Message"],
"dynamic": false,
"info": "Comma-separated list of tokens to signal the model to stop generating text.",
"title_case": false,
"type": "str",
"_input_type": "MessageTextInput"
},
"stream": {
"tool_mode": false,
"trace_as_metadata": true,
"list": false,
"list_add_label": "Add More",
"required": false,
"placeholder": "",
"show": true,
"name": "stream",
"value": false,
"display_name": "Stream",
"advanced": true,
"dynamic": false,
"info": "Stream the response from the model. Streaming works only in Chat.",
"title_case": false,
"type": "bool",
"_input_type": "BoolInput"
},
"system": {
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"load_from_db": false,
"list": false,
"list_add_label": "Add More",
"required": false,
"placeholder": "",
"show": true,
"name": "system",
"value": "",
"display_name": "System",
"advanced": true,
"input_types": ["Message"],
"dynamic": false,
"info": "System to use for generating text.",
"title_case": false,
"type": "str",
"_input_type": "MessageTextInput"
},
"system_message": {
"tool_mode": false,
"trace_as_input": true,
"multiline": true,
"trace_as_metadata": true,
"load_from_db": false,
"list": false,
"list_add_label": "Add More",
"required": false,
"placeholder": "",
"show": true,
"name": "system_message",
"value": "",
"display_name": "System Message",
"advanced": false,
"input_types": ["Message"],
"dynamic": false,
"info": "System message to pass to the model.",
"title_case": false,
"copy_field": false,
"type": "str",
"_input_type": "MultilineInput"
},
"tags": {
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"load_from_db": false,
"list": false,
"list_add_label": "Add More",
"required": false,
"placeholder": "",
"show": true,
"name": "tags",
"value": "",
"display_name": "Tags",
"advanced": true,
"input_types": ["Message"],
"dynamic": false,
"info": "Comma-separated list of tags to add to the run trace.",
"title_case": false,
"type": "str",
"_input_type": "MessageTextInput"
},
"temperature": {
"tool_mode": false,
"min_label": "",
"max_label": "",
"min_label_icon": "",
"max_label_icon": "",
"slider_buttons": false,
"slider_buttons_options": [],
"slider_input": false,
"range_spec": {
"step_type": "float",
"min": 0,
"max": 1,
"step": 0.01
},
"required": false,
"placeholder": "",
"show": true,
"name": "temperature",
"value": 0.1,
"display_name": "Temperature",
"advanced": true,
"dynamic": false,
"info": "",
"title_case": false,
"type": "slider",
"_input_type": "SliderInput"
},
"template": {
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"load_from_db": false,
"list": false,
"list_add_label": "Add More",
"required": false,
"placeholder": "",
"show": true,
"name": "template",
"value": "",
"display_name": "Template",
"advanced": true,
"input_types": ["Message"],
"dynamic": false,
"info": "Template to use for generating text.",
"title_case": false,
"type": "str",
"_input_type": "MessageTextInput"
},
"tfs_z": {
"tool_mode": false,
"trace_as_metadata": true,
"list": false,
"list_add_label": "Add More",
"required": false,
"placeholder": "",
"show": true,
"name": "tfs_z",
"value": "",
"display_name": "TFS Z",
"advanced": true,
"dynamic": false,
"info": "Tail free sampling value. (Default: 1)",
"title_case": false,
"type": "float",
"_input_type": "FloatInput"
},
"timeout": {
"tool_mode": false,
"trace_as_metadata": true,
"list": false,
"list_add_label": "Add More",
"required": false,
"placeholder": "",
"show": true,
"name": "timeout",
"value": "",
"display_name": "Timeout",
"advanced": true,
"dynamic": false,
"info": "Timeout for the request stream.",
"title_case": false,
"type": "int",
"_input_type": "IntInput"
},
"tool_model_enabled": {
"tool_mode": false,
"trace_as_metadata": true,
"list": false,
"list_add_label": "Add More",
"required": false,
"placeholder": "",
"show": true,
"name": "tool_model_enabled",
"value": true,
"display_name": "Tool Model Enabled",
"advanced": false,
"dynamic": false,
"info": "Whether to enable tool calling in the model.",
"real_time_refresh": true,
"title_case": false,
"type": "bool",
"_input_type": "BoolInput"
},
"top_k": {
"tool_mode": false,
"trace_as_metadata": true,
"list": false,
"list_add_label": "Add More",
"required": false,
"placeholder": "",
"show": true,
"name": "top_k",
"value": "",
"display_name": "Top K",
"advanced": true,
"dynamic": false,
"info": "Limits token selection to top K. (Default: 40)",
"title_case": false,
"type": "int",
"_input_type": "IntInput"
},
"top_p": {
"tool_mode": false,
"trace_as_metadata": true,
"list": false,
"list_add_label": "Add More",
"required": false,
"placeholder": "",
"show": true,
"name": "top_p",
"value": "",
"display_name": "Top P",
"advanced": true,
"dynamic": false,
"info": "Works together with top-k. (Default: 0.9)",
"title_case": false,
"type": "float",
"_input_type": "FloatInput"
},
"verbose": {
"tool_mode": false,
"trace_as_metadata": true,
"list": false,
"list_add_label": "Add More",
"required": false,
"placeholder": "",
"show": true,
"name": "verbose",
"value": false,
"display_name": "Verbose",
"advanced": true,
"dynamic": false,
"info": "Whether to print out response text.",
"title_case": false,
"type": "bool",
"_input_type": "BoolInput"
}
},
"description": "Generate text using Ollama Local LLMs.",
"icon": "Ollama",
"base_classes": ["LanguageModel", "Message"],
"display_name": "Ollama",
"documentation": "",
"minimized": false,
"custom_fields": {},
"output_types": [],
"pinned": false,
"conditional_paths": [],
"frozen": false,
"outputs": [
{
"types": ["Message"],
"name": "text_output",
"display_name": "Model Response",
"method": "text_response",
"value": "__UNDEFINED__",
"cache": true,
"required_inputs": null,
"allows_loop": false,
"group_outputs": false,
"options": null,
"tool_mode": true
},
{
"types": ["LanguageModel"],
"selected": "LanguageModel",
"name": "model_output",
"display_name": "Language Model",
"method": "build_model",
"value": "__UNDEFINED__",
"cache": true,
"required_inputs": null,
"allows_loop": false,
"group_outputs": false,
"options": null,
"tool_mode": true
}
],
"field_order": [
"base_url",
"model_name",
"temperature",
"format",
"metadata",
"mirostat",
"mirostat_eta",
"mirostat_tau",
"num_ctx",
"num_gpu",
"num_thread",
"repeat_last_n",
"repeat_penalty",
"tfs_z",
"timeout",
"top_k",
"top_p",
"verbose",
"tags",
"stop_tokens",
"system",
"tool_model_enabled",
"template",
"input_value",
"system_message",
"stream"
],
"beta": false,
"legacy": false,
"edited": false,
"metadata": {
"keywords": ["model", "llm", "language model", "large language model"],
"module": "lfx.components.ollama.ollama.ChatOllamaComponent",
"code_hash": "79649147b972",
"dependencies": {
"total_dependencies": 3,
"dependencies": [
{ "name": "httpx", "version": "0.28.1" },
{ "name": "langchain_ollama", "version": "0.2.1" },
{ "name": "lfx", "version": "0.1.12.dev32" }
]
}
},
"tool_mode": false,
"last_updated": "2025-10-29T20:37:13.232Z",
"official": false
},
"showNode": true,
"type": "OllamaModel",
"id": "OllamaModel-0hTPe",
"selected_output": "model_output"
},
"id": "OllamaModel-0hTPe",
"position": { "x": 0, "y": 0 },
"type": "genericNode"
}