diff --git a/.DS_Store b/.DS_Store
index e98b18b1..ca39b6e3 100644
Binary files a/.DS_Store and b/.DS_Store differ
diff --git a/Dockerfile.langflow b/Dockerfile.langflow
index 2acb4877..86ee0ea5 100644
--- a/Dockerfile.langflow
+++ b/Dockerfile.langflow
@@ -7,7 +7,7 @@ ENV RUSTFLAGS="--cfg reqwest_unstable"
# Accept build arguments for git repository and branch
ARG GIT_REPO=https://github.com/langflow-ai/langflow.git
-ARG GIT_BRANCH=load_flows_autologin_false
+ARG GIT_BRANCH=test-openai-responses
WORKDIR /app
diff --git a/README.md b/README.md
index df1d6451..a0178f28 100644
--- a/README.md
+++ b/README.md
@@ -62,7 +62,7 @@ LANGFLOW_CHAT_FLOW_ID=your_chat_flow_id
LANGFLOW_INGEST_FLOW_ID=your_ingest_flow_id
NUDGES_FLOW_ID=your_nudges_flow_id
```
-See extended configuration, including ingestion and optional variables: [docs/configure/configuration.md](docs/docs/configure/configuration.md)
+See extended configuration, including ingestion and optional variables: [docs/reference/configuration.md](docs/docs/reference/configuration.md)
### 3. Start OpenRAG
```bash
diff --git a/docker-compose.yml b/docker-compose.yml
index daa921ae..be31fb71 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -40,9 +40,9 @@ services:
openrag-backend:
image: phact/openrag-backend:${OPENRAG_VERSION:-latest}
- #build:
- #context: .
- #dockerfile: Dockerfile.backend
+ # build:
+ # context: .
+ # dockerfile: Dockerfile.backend
container_name: openrag-backend
depends_on:
- langflow
@@ -77,9 +77,10 @@ services:
openrag-frontend:
image: phact/openrag-frontend:${OPENRAG_VERSION:-latest}
- #build:
- #context: .
- #dockerfile: Dockerfile.frontend
+ # build:
+ # context: .
+ # dockerfile: Dockerfile.frontend
+ # #dockerfile: Dockerfile.frontend
container_name: openrag-frontend
depends_on:
- openrag-backend
@@ -92,6 +93,9 @@ services:
volumes:
- ./flows:/app/flows:z
image: phact/openrag-langflow:${LANGFLOW_VERSION:-latest}
+ # build:
+ # context: .
+ # dockerfile: Dockerfile.langflow
container_name: langflow
ports:
- "7860:7860"
@@ -99,7 +103,7 @@ services:
- OPENAI_API_KEY=${OPENAI_API_KEY}
- LANGFLOW_LOAD_FLOWS_PATH=/app/flows
- LANGFLOW_SECRET_KEY=${LANGFLOW_SECRET_KEY}
- - JWT="dummy"
+ - JWT=None
- OWNER=None
- OWNER_NAME=None
- OWNER_EMAIL=None
diff --git a/docs/docs/_partial-onboarding.mdx b/docs/docs/_partial-onboarding.mdx
index 5efbf2eb..44222371 100644
--- a/docs/docs/_partial-onboarding.mdx
+++ b/docs/docs/_partial-onboarding.mdx
@@ -5,10 +5,12 @@ import TabItem from '@theme/TabItem';
The first time you start OpenRAG, whether using the TUI or a `.env` file, you must complete application onboarding.
-Values input during onboarding can be changed later in the OpenRAG **Settings** page, except for the language model and embedding model _provider_.
-**Your provider can only be selected once, and you must use the same provider for your language model and embedding model.**
-The language model can be changed, but the embeddings model cannot be changed.
-To change your provider selection, you must completely reinstall OpenRAG.
+Most values from onboarding can be changed later in the OpenRAG **Settings** page, but there are important restrictions.
+
+The **language model provider** and **embeddings model provider** can only be selected at onboarding, and you must use the same provider for your language model and embedding model.
+To change your provider selection later, you must completely reinstall OpenRAG.
+
+The **language model** can be changed later in **Settings**, but the **embeddings model** cannot be changed later.
@@ -36,14 +38,12 @@ To change your provider selection, you must completely reinstall OpenRAG.
:::
1. Enter your Ollama server's base URL address.
The default Ollama server address is `http://localhost:11434`.
- Since OpenRAG is running in a container, you may need to change `localhost` to access services outside of the container. For example, change `http://localhost:11434` to `http://host.docker.internal:11434` to connect to Ollama.
- OpenRAG automatically sends a test connection to your Ollama server to confirm connectivity.
+ OpenRAG automatically transforms `localhost` to access services outside of the container, and sends a test connection to your Ollama server to confirm connectivity.
2. Select the **Embedding Model** and **Language Model** your Ollama server is running.
- OpenRAG automatically lists the available models from your Ollama server.
+ OpenRAG retrieves the available models from your Ollama server.
3. To load 2 sample PDFs, enable **Sample dataset**.
This is recommended, but not required.
4. Click **Complete**.
5. Continue with the [Quickstart](/quickstart).
-
-
+
\ No newline at end of file
diff --git a/docs/docs/configure/configuration.mdx b/docs/docs/configure/configuration.mdx
deleted file mode 100644
index d8058254..00000000
--- a/docs/docs/configure/configuration.mdx
+++ /dev/null
@@ -1,110 +0,0 @@
----
-title: Configuration
-slug: /configure/configuration
----
-
-import PartialExternalPreview from '@site/docs/_partial-external-preview.mdx';
-
-
-
-OpenRAG supports multiple configuration methods with the following priority:
-
-1. **Environment Variables** (highest priority)
-2. **Configuration File** (`config.yaml`)
-3. **Langflow Flow Settings** (runtime override)
-4. **Default Values** (fallback)
-
-## Configuration File
-
-Create a `config.yaml` file in the project root to configure OpenRAG:
-
-```yaml
-# OpenRAG Configuration File
-provider:
- model_provider: "openai" # openai, anthropic, azure, etc.
- api_key: "your-api-key" # or use OPENAI_API_KEY env var
-
-knowledge:
- embedding_model: "text-embedding-3-small"
- chunk_size: 1000
- chunk_overlap: 200
- ocr: true
- picture_descriptions: false
-
-agent:
- llm_model: "gpt-4o-mini"
- system_prompt: "You are a helpful AI assistant..."
-```
-
-## Environment Variables
-
-Environment variables will override configuration file settings. You can still use `.env` files:
-
-```bash
-cp .env.example .env
-```
-
-## Required Variables
-
-| Variable | Description |
-| ----------------------------- | ------------------------------------------- |
-| `OPENAI_API_KEY` | Your OpenAI API key |
-| `OPENSEARCH_PASSWORD` | Password for OpenSearch admin user |
-| `LANGFLOW_SUPERUSER` | Langflow admin username |
-| `LANGFLOW_SUPERUSER_PASSWORD` | Langflow admin password |
-| `LANGFLOW_CHAT_FLOW_ID` | ID of your Langflow chat flow |
-| `LANGFLOW_INGEST_FLOW_ID` | ID of your Langflow ingestion flow |
-| `NUDGES_FLOW_ID` | ID of your Langflow nudges/suggestions flow |
-
-## Ingestion Configuration
-
-| Variable | Description |
-| ------------------------------ | ------------------------------------------------------ |
-| `DISABLE_INGEST_WITH_LANGFLOW` | Disable Langflow ingestion pipeline (default: `false`) |
-
-- `false` or unset: Uses Langflow pipeline (upload → ingest → delete)
-- `true`: Uses traditional OpenRAG processor for document ingestion
-
-## Optional Variables
-
-| Variable | Description |
-| ------------------------------------------------------------------------- | ------------------------------------------------------------------ |
-| `LANGFLOW_PUBLIC_URL` | Public URL for Langflow (default: `http://localhost:7860`) |
-| `GOOGLE_OAUTH_CLIENT_ID` / `GOOGLE_OAUTH_CLIENT_SECRET` | Google OAuth authentication |
-| `MICROSOFT_GRAPH_OAUTH_CLIENT_ID` / `MICROSOFT_GRAPH_OAUTH_CLIENT_SECRET` | Microsoft OAuth |
-| `WEBHOOK_BASE_URL` | Base URL for webhook endpoints |
-| `AWS_ACCESS_KEY_ID` / `AWS_SECRET_ACCESS_KEY` | AWS integrations |
-| `SESSION_SECRET` | Session management (default: auto-generated, change in production) |
-| `LANGFLOW_KEY` | Explicit Langflow API key (auto-generated if not provided) |
-| `LANGFLOW_SECRET_KEY` | Secret key for Langflow internal operations |
-
-## OpenRAG Configuration Variables
-
-These environment variables override settings in `config.yaml`:
-
-### Provider Settings
-
-| Variable | Description | Default |
-| ------------------ | ---------------------------------------- | -------- |
-| `MODEL_PROVIDER` | Model provider (openai, anthropic, etc.) | `openai` |
-| `PROVIDER_API_KEY` | API key for the model provider | |
-| `OPENAI_API_KEY` | OpenAI API key (backward compatibility) | |
-
-### Knowledge Settings
-
-| Variable | Description | Default |
-| ------------------------------ | --------------------------------------- | ------------------------ |
-| `EMBEDDING_MODEL` | Embedding model for vector search | `text-embedding-3-small` |
-| `CHUNK_SIZE` | Text chunk size for document processing | `1000` |
-| `CHUNK_OVERLAP` | Overlap between chunks | `200` |
-| `OCR_ENABLED` | Enable OCR for image processing | `true` |
-| `PICTURE_DESCRIPTIONS_ENABLED` | Enable picture descriptions | `false` |
-
-### Agent Settings
-
-| Variable | Description | Default |
-| --------------- | --------------------------------- | ------------------------ |
-| `LLM_MODEL` | Language model for the chat agent | `gpt-4o-mini` |
-| `SYSTEM_PROMPT` | System prompt for the agent | Default assistant prompt |
-
-See `.env.example` for a complete list with descriptions, and `docker-compose*.yml` for runtime usage.
diff --git a/docs/docs/core-components/agents.mdx b/docs/docs/core-components/agents.mdx
index 9b4adb4b..3ee4617b 100644
--- a/docs/docs/core-components/agents.mdx
+++ b/docs/docs/core-components/agents.mdx
@@ -52,7 +52,7 @@ This filter is the [Knowledge filter](/knowledge#create-knowledge-filters), and
-For an example of changing out the agent's LLM in OpenRAG, see the [Quickstart](/quickstart#change-components).
+For an example of changing out the agent's language model in OpenRAG, see the [Quickstart](/quickstart#change-components).
To restore the flow to its initial state, in OpenRAG, click **Settings**, and then click **Restore Flow**.
OpenRAG warns you that this discards all custom settings. Click **Restore** to restore the flow.
diff --git a/docs/docs/core-components/ingestion.mdx b/docs/docs/core-components/ingestion.mdx
index 08071158..d3ce81b0 100644
--- a/docs/docs/core-components/ingestion.mdx
+++ b/docs/docs/core-components/ingestion.mdx
@@ -46,7 +46,7 @@ If OpenRAG detects that the local machine is running on macOS, OpenRAG uses the
## Use OpenRAG default ingestion instead of Docling serve
-If you want to use OpenRAG's built-in pipeline instead of Docling serve, set `DISABLE_INGEST_WITH_LANGFLOW=true` in [Environment variables](/configure/configuration#ingestion-configuration).
+If you want to use OpenRAG's built-in pipeline instead of Docling serve, set `DISABLE_INGEST_WITH_LANGFLOW=true` in [Environment variables](/reference/configuration#document-processing).
The built-in pipeline still uses the Docling processor, but uses it directly without the Docling Serve API.
diff --git a/docs/docs/get-started/docker.mdx b/docs/docs/get-started/docker.mdx
index 8542169a..f7ec730b 100644
--- a/docs/docs/get-started/docker.mdx
+++ b/docs/docs/get-started/docker.mdx
@@ -51,12 +51,12 @@ The following values are **required** to be set:
```bash
OPENSEARCH_PASSWORD=your_secure_password
OPENAI_API_KEY=your_openai_api_key
-
LANGFLOW_SUPERUSER=admin
LANGFLOW_SUPERUSER_PASSWORD=your_langflow_password
LANGFLOW_SECRET_KEY=your_secret_key
```
- For more information on configuring OpenRAG with environment variables, see [Environment variables](/configure/configuration).
+
+ For more information on configuring OpenRAG with environment variables, see [Environment variables](/reference/configuration).
4. Deploy OpenRAG with Docker Compose based on your deployment type.
@@ -95,12 +95,35 @@ The following values are **required** to be set:
-## Rebuild all Docker containers
+## Container management commands
-If you need to reset state and rebuild all of your containers, run the following command.
+Manage your OpenRAG containers with the following commands.
+These commands are also available in the TUI's [Status menu](/get-started/tui#status).
+
+### Upgrade containers
+
+Upgrade your containers to the latest version while preserving your data.
+
+```bash
+docker compose pull
+docker compose up -d --force-recreate
+```
+
+### Rebuild containers (destructive)
+
+Reset state by rebuilding all of your containers.
Your OpenSearch and Langflow databases will be lost.
Documents stored in the `./documents` directory will persist, since the directory is mounted as a volume in the OpenRAG backend container.
```bash
docker compose up --build --force-recreate --remove-orphans
```
+
+### Remove all containers and data (destructive)
+
+Completely remove your OpenRAG installation and delete all data.
+This deletes all of your data, including OpenSearch data, uploaded documents, and authentication.
+```bash
+docker compose down --volumes --remove-orphans --rmi local
+docker system prune -f
+```
\ No newline at end of file
diff --git a/docs/docs/get-started/install.mdx b/docs/docs/get-started/install.mdx
index 830a8f5e..1759e813 100644
--- a/docs/docs/get-started/install.mdx
+++ b/docs/docs/get-started/install.mdx
@@ -5,12 +5,12 @@ slug: /install
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
-import PartialOnboarding from '@site/docs/_partial-onboarding.mdx';
+import PartialOnboarding from '@site/docs/_partial-onboarding.mdx';
import PartialExternalPreview from '@site/docs/_partial-external-preview.mdx';
-[Install the OpenRAG Python wheel](#install-python-wheel) and then use the [OpenRAG Terminal User Interface(TUI)](#setup) to run and configure your OpenRAG deployment with a guided setup process.
+[Install the OpenRAG Python wheel](#install-python-wheel), and then run the [OpenRAG Terminal User Interface(TUI)](#setup) to start your OpenRAG deployment with a guided setup process.
If you prefer running Docker commands and manually editing `.env` files, see [Deploy with Docker](/get-started/docker).
@@ -29,17 +29,16 @@ If you prefer running Docker commands and manually editing `.env` files, see [De
The `.whl` file is currently available as an internal download during public preview, and will be published to PyPI in a future release.
:::
-The OpenRAG wheel installs the Terminal User Interface (TUI) for running and managing OpenRAG.
+The OpenRAG wheel installs the Terminal User Interface (TUI) for configuring and running OpenRAG.
-1. Create a new project with a virtual environment using `uv`.
- This creates and activates a virtual environment for your project.
+1. Create a new project with a virtual environment using `uv init`.
```bash
uv init YOUR_PROJECT_NAME
cd YOUR_PROJECT_NAME
```
- The terminal prompt won't change like it would when using `venv`, but the `uv` commands will use the project's virtual environment.
+ The `(venv)` prompt doesn't change, but `uv` commands will automatically use the project's virtual environment.
For more information on virtual environments, see the [uv documentation](https://docs.astral.sh/uv/pip/environments).
2. Add the local OpenRAG wheel to your project's virtual environment.
@@ -65,7 +64,9 @@ The OpenRAG wheel installs the Terminal User Interface (TUI) for running and man
## Set up OpenRAG with the TUI {#setup}
-**Basic Setup** completes or auto-generates most of the required values to start OpenRAG.
+The TUI creates a `.env` file in your OpenRAG directory root and starts OpenRAG.
+
+**Basic Setup** generates all of the required values except the OpenAI API key.
**Basic Setup** does not set up OAuth connections for ingestion from Google Drive, OneDrive, or AWS.
For OAuth setup, use **Advanced Setup**.
diff --git a/docs/docs/reference/configuration.mdx b/docs/docs/reference/configuration.mdx
new file mode 100644
index 00000000..c8ca7cfa
--- /dev/null
+++ b/docs/docs/reference/configuration.mdx
@@ -0,0 +1,162 @@
+---
+title: Environment variables
+slug: /reference/configuration
+---
+
+import Icon from "@site/src/components/icon/icon";
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+OpenRAG recognizes [supported environment variables](#supported-environment-variables) from the following sources:
+
+* [Environment variables](#supported-environment-variables) - Values set in the `.env` file.
+* [Langflow runtime overrides](#langflow-runtime-overrides) - Langflow components may tweak environment variables at runtime.
+* [Default or fallback values](#default-values-and-fallbacks) - These values are default or fallback values if OpenRAG doesn't find a value.
+
+## Configure environment variables
+
+Environment variables are set in a `.env` file in the root of your OpenRAG project directory.
+
+For an example `.env` file, see [`.env.example` in the OpenRAG repository](https://github.com/langflow-ai/openrag/blob/main/.env.example).
+
+The Docker Compose files are populated with values from your `.env`, so you don't need to edit the Docker Compose files manually.
+
+Environment variables always take precedence over other variables.
+
+### Set environment variables
+
+To set environment variables, do the following.
+
+1. Stop OpenRAG.
+2. Set the values in the `.env` file:
+ ```bash
+ LOG_LEVEL=DEBUG
+ LOG_FORMAT=json
+ SERVICE_NAME=openrag-dev
+ ```
+3. Start OpenRAG.
+
+Updating provider API keys or provider endpoints in the `.env` file will not take effect after [Application onboarding](/install#application-onboarding). To change these values, you must:
+
+1. Stop OpenRAG.
+2. Remove the containers:
+ ```
+ docker-compose down
+ ```
+3. Update the values in your `.env` file.
+4. Start OpenRAG containers.
+ ```
+ docker-compose up -d
+ ```
+5. Complete [Application onboarding](/install#application-onboarding) again.
+
+## Supported environment variables
+
+All OpenRAG configuration can be controlled through environment variables.
+
+### AI provider settings
+
+Configure which AI models and providers OpenRAG uses for language processing and embeddings.
+For more information, see [Application onboarding](/install#application-onboarding).
+
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `EMBEDDING_MODEL` | `text-embedding-3-small` | Embedding model for vector search. |
+| `LLM_MODEL` | `gpt-4o-mini` | Language model for the chat agent. |
+| `MODEL_PROVIDER` | `openai` | Model provider, such as OpenAI or IBM watsonx.ai. |
+| `OPENAI_API_KEY` | - | Your OpenAI API key. Required. |
+| `PROVIDER_API_KEY` | - | API key for the model provider. |
+| `PROVIDER_ENDPOINT` | - | Custom provider endpoint. Only used for IBM or Ollama providers. |
+| `PROVIDER_PROJECT_ID` | - | Project ID for providers. Only required for the IBM watsonx.ai provider. |
+
+### Document processing
+
+Control how OpenRAG processes and ingests documents into your knowledge base.
+For more information, see [Ingestion](/ingestion).
+
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `CHUNK_OVERLAP` | `200` | Overlap between chunks. |
+| `CHUNK_SIZE` | `1000` | Text chunk size for document processing. |
+| `DISABLE_INGEST_WITH_LANGFLOW` | `false` | Disable Langflow ingestion pipeline. |
+| `DOCLING_OCR_ENGINE` | - | OCR engine for document processing. |
+| `OCR_ENABLED` | `false` | Enable OCR for image processing. |
+| `OPENRAG_DOCUMENTS_PATHS` | `./documents` | Document paths for ingestion. |
+| `PICTURE_DESCRIPTIONS_ENABLED` | `false` | Enable picture descriptions. |
+
+### Langflow settings
+
+Configure Langflow authentication.
+
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `LANGFLOW_AUTO_LOGIN` | `False` | Enable auto-login for Langflow. |
+| `LANGFLOW_CHAT_FLOW_ID` | pre-filled | This value is pre-filled. The default value is found in [.env.example](https://github.com/langflow-ai/openrag/blob/main/.env.example). |
+| `LANGFLOW_ENABLE_SUPERUSER_CLI` | `False` | Enable superuser CLI. |
+| `LANGFLOW_INGEST_FLOW_ID` | pre-filled | This value is pre-filled. The default value is found in [.env.example](https://github.com/langflow-ai/openrag/blob/main/.env.example). |
+| `LANGFLOW_KEY` | auto-generated | Explicit Langflow API key. |
+| `LANGFLOW_NEW_USER_IS_ACTIVE` | `False` | New users are active by default. |
+| `LANGFLOW_PUBLIC_URL` | `http://localhost:7860` | Public URL for Langflow. |
+| `LANGFLOW_SECRET_KEY` | - | Secret key for Langflow internal operations. |
+| `LANGFLOW_SUPERUSER` | - | Langflow admin username. Required. |
+| `LANGFLOW_SUPERUSER_PASSWORD` | - | Langflow admin password. Required. |
+| `LANGFLOW_URL` | `http://localhost:7860` | Langflow URL. |
+| `NUDGES_FLOW_ID` | pre-filled | This value is pre-filled. The default value is found in [.env.example](https://github.com/langflow-ai/openrag/blob/main/.env.example). |
+| `SYSTEM_PROMPT` | "You are a helpful AI assistant with access to a knowledge base. Answer questions based on the provided context." | System prompt for the Langflow agent. |
+
+### OAuth provider settings
+
+Configure OAuth providers and external service integrations.
+
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `AWS_ACCESS_KEY_ID` / `AWS_SECRET_ACCESS_KEY` | - | AWS integrations. |
+| `GOOGLE_OAUTH_CLIENT_ID` / `GOOGLE_OAUTH_CLIENT_SECRET` | - | Google OAuth authentication. |
+| `MICROSOFT_GRAPH_OAUTH_CLIENT_ID` / `MICROSOFT_GRAPH_OAUTH_CLIENT_SECRET` | - | Microsoft OAuth. |
+| `WEBHOOK_BASE_URL` | - | Base URL for webhook endpoints. |
+
+### OpenSearch settings
+
+Configure OpenSearch database authentication.
+
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `OPENSEARCH_HOST` | `localhost` | OpenSearch host. |
+| `OPENSEARCH_PASSWORD` | - | Password for OpenSearch admin user. Required. |
+| `OPENSEARCH_PORT` | `9200` | OpenSearch port. |
+| `OPENSEARCH_USERNAME` | `admin` | OpenSearch username. |
+
+### System settings
+
+Configure general system components, session management, and logging.
+
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `LANGFLOW_KEY_RETRIES` | `15` | Number of retries for Langflow key generation. |
+| `LANGFLOW_KEY_RETRY_DELAY` | `2.0` | Delay between retries in seconds. |
+| `LOG_FORMAT` | - | Log format (set to "json" for JSON output). |
+| `LOG_LEVEL` | `INFO` | Logging level (DEBUG, INFO, WARNING, ERROR). |
+| `MAX_WORKERS` | - | Maximum number of workers for document processing. |
+| `SERVICE_NAME` | `openrag` | Service name for logging. |
+| `SESSION_SECRET` | auto-generated | Session management. |
+
+## Langflow runtime overrides
+
+Langflow runtime overrides allow you to modify component settings at runtime without changing the base configuration.
+
+Runtime overrides are implemented through **tweaks** - parameter modifications that are passed to specific Langflow components during flow execution.
+
+For more information on tweaks, see [Input schema (tweaks)](https://docs.langflow.org/concepts-publish#input-schema).
+
+## Default values and fallbacks
+
+When no environment variables or configuration file values are provided, OpenRAG uses default values.
+These values can be found in the code base at the following locations.
+
+### OpenRAG configuration defaults
+
+These values are defined in [`config_manager.py` in the OpenRAG repository](https://github.com/langflow-ai/openrag/blob/main/src/config/config_manager.py).
+
+### System configuration defaults
+
+These fallback values are defined in [`settings.py` in the OpenRAG repository](https://github.com/langflow-ai/openrag/blob/main/src/config/settings.py).
\ No newline at end of file
diff --git a/docs/docs/support/troubleshoot.mdx b/docs/docs/support/troubleshoot.mdx
index dc62cf43..9946db38 100644
--- a/docs/docs/support/troubleshoot.mdx
+++ b/docs/docs/support/troubleshoot.mdx
@@ -13,12 +13,12 @@ This page provides troubleshooting advice for issues you might encounter when us
## OpenSearch fails to start
-Check that `OPENSEARCH_PASSWORD` set in [Environment variables](/configure/configuration) meets requirements.
+Check that `OPENSEARCH_PASSWORD` set in [Environment variables](/reference/configuration) meets requirements.
The password must contain at least 8 characters, and must contain at least one uppercase letter, one lowercase letter, one digit, and one special character that is strong.
## Langflow connection issues
-Verify the `LANGFLOW_SUPERUSER` credentials set in [Environment variables](/configure/configuration) are correct.
+Verify the `LANGFLOW_SUPERUSER` credentials set in [Environment variables](/reference/configuration) are correct.
## Memory errors
@@ -108,4 +108,4 @@ To reset your local containers and pull new images, do the following:
3. In the OpenRAG TUI, click **Status**, and then click **Upgrade**.
When the **Close** button is active, the upgrade is complete.
-Close the window and open the OpenRAG appplication.
+Close the window and open the OpenRAG appplication.
\ No newline at end of file
diff --git a/docs/sidebars.js b/docs/sidebars.js
index e95a6f19..9d0c49c8 100644
--- a/docs/sidebars.js
+++ b/docs/sidebars.js
@@ -70,11 +70,11 @@ const sidebars = {
},
{
type: "category",
- label: "Configuration",
+ label: "Reference",
items: [
{
type: "doc",
- id: "configure/configuration",
+ id: "reference/configuration",
label: "Environment variables"
},
],
@@ -93,4 +93,4 @@ const sidebars = {
],
};
-export default sidebars;
+export default sidebars;
\ No newline at end of file
diff --git a/flows/components/ollama_embedding.json b/flows/components/ollama_embedding.json
index 01b83c44..24974b46 100644
--- a/flows/components/ollama_embedding.json
+++ b/flows/components/ollama_embedding.json
@@ -1,28 +1,114 @@
{
"data": {
- "id": "OllamaEmbeddings-4ah5Q",
"node": {
- "base_classes": [
- "Embeddings"
- ],
- "beta": false,
- "conditional_paths": [],
- "custom_fields": {},
+ "template": {
+ "_type": "Component",
+ "base_url": {
+ "tool_mode": false,
+ "trace_as_input": true,
+ "trace_as_metadata": true,
+ "load_from_db": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": true,
+ "placeholder": "",
+ "show": true,
+ "name": "base_url",
+ "value": "OLLAMA_BASE_URL",
+ "display_name": "Ollama Base URL",
+ "advanced": false,
+ "input_types": ["Message"],
+ "dynamic": false,
+ "info": "",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "MessageTextInput"
+ },
+ "code": {
+ "type": "code",
+ "required": true,
+ "placeholder": "",
+ "list": false,
+ "show": true,
+ "multiline": true,
+ "value": "from typing import Any\nfrom urllib.parse import urljoin\n\nimport httpx\nfrom langchain_ollama import OllamaEmbeddings\n\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.base.models.ollama_constants import OLLAMA_EMBEDDING_MODELS, URL_LIST\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import DropdownInput, MessageTextInput, Output\n\nHTTP_STATUS_OK = 200\n\n\nclass OllamaEmbeddingsComponent(LCModelComponent):\n display_name: str = \"Ollama Embeddings\"\n description: str = \"Generate embeddings using Ollama models.\"\n documentation = \"https://python.langchain.com/docs/integrations/text_embedding/ollama\"\n icon = \"Ollama\"\n name = \"OllamaEmbeddings\"\n\n inputs = [\n DropdownInput(\n name=\"model_name\",\n display_name=\"Ollama Model\",\n value=\"\",\n options=[],\n real_time_refresh=True,\n refresh_button=True,\n combobox=True,\n required=True,\n ),\n MessageTextInput(\n name=\"base_url\",\n display_name=\"Ollama Base URL\",\n value=\"\",\n required=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Embeddings\", name=\"embeddings\", method=\"build_embeddings\"),\n ]\n\n def build_embeddings(self) -> Embeddings:\n try:\n output = OllamaEmbeddings(model=self.model_name, base_url=self.base_url)\n except Exception as e:\n msg = (\n \"Unable to connect to the Ollama API. \",\n \"Please verify the base URL, ensure the relevant Ollama model is pulled, and try again.\",\n )\n raise ValueError(msg) from e\n return output\n\n async def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None):\n if field_name in {\"base_url\", \"model_name\"} and not await self.is_valid_ollama_url(field_value):\n # Check if any URL in the list is valid\n valid_url = \"\"\n for url in URL_LIST:\n if await self.is_valid_ollama_url(url):\n valid_url = url\n break\n build_config[\"base_url\"][\"value\"] = valid_url\n if field_name in {\"model_name\", \"base_url\", \"tool_model_enabled\"}:\n if await self.is_valid_ollama_url(self.base_url):\n build_config[\"model_name\"][\"options\"] = await self.get_model(self.base_url)\n elif await self.is_valid_ollama_url(build_config[\"base_url\"].get(\"value\", \"\")):\n build_config[\"model_name\"][\"options\"] = await self.get_model(build_config[\"base_url\"].get(\"value\", \"\"))\n else:\n build_config[\"model_name\"][\"options\"] = []\n\n return build_config\n\n async def get_model(self, base_url_value: str) -> list[str]:\n \"\"\"Get the model names from Ollama.\"\"\"\n model_ids = []\n try:\n url = urljoin(base_url_value, \"/api/tags\")\n async with httpx.AsyncClient() as client:\n response = await client.get(url)\n response.raise_for_status()\n data = response.json()\n\n model_ids = [model[\"name\"] for model in data.get(\"models\", [])]\n # this to ensure that not embedding models are included.\n # not even the base models since models can have 1b 2b etc\n # handles cases when embeddings models have tags like :latest - etc.\n model_ids = [\n model\n for model in model_ids\n if any(model.startswith(f\"{embedding_model}\") for embedding_model in OLLAMA_EMBEDDING_MODELS)\n ]\n\n except (ImportError, ValueError, httpx.RequestError) as e:\n msg = \"Could not get model names from Ollama.\"\n raise ValueError(msg) from e\n\n return model_ids\n\n async def is_valid_ollama_url(self, url: str) -> bool:\n try:\n async with httpx.AsyncClient() as client:\n return (await client.get(f\"{url}/api/tags\")).status_code == HTTP_STATUS_OK\n except httpx.RequestError:\n return False\n",
+ "fileTypes": [],
+ "file_path": "",
+ "password": false,
+ "name": "code",
+ "advanced": true,
+ "dynamic": true,
+ "info": "",
+ "load_from_db": false,
+ "title_case": false
+ },
+ "model_name": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "options": ["nomic-embed-text:latest", "all-minilm:latest"],
+ "options_metadata": [],
+ "combobox": true,
+ "dialog_inputs": {},
+ "toggle": false,
+ "required": true,
+ "placeholder": "",
+ "show": true,
+ "name": "model_name",
+ "value": "",
+ "display_name": "Ollama Model",
+ "advanced": false,
+ "dynamic": false,
+ "info": "",
+ "real_time_refresh": true,
+ "refresh_button": true,
+ "title_case": false,
+ "external_options": {},
+ "type": "str",
+ "_input_type": "DropdownInput"
+ }
+ },
"description": "Generate embeddings using Ollama models.",
+ "icon": "Ollama",
+ "base_classes": ["Embeddings"],
"display_name": "Ollama Embeddings",
"documentation": "https://python.langchain.com/docs/integrations/text_embedding/ollama",
- "edited": false,
- "field_order": [
- "model_name",
- "base_url"
- ],
+ "minimized": false,
+ "custom_fields": {},
+ "output_types": [],
+ "pinned": false,
+ "conditional_paths": [],
"frozen": false,
- "icon": "Ollama",
- "last_updated": "2025-09-22T20:18:27.128Z",
+ "outputs": [
+ {
+ "types": ["Embeddings"],
+ "selected": "Embeddings",
+ "name": "embeddings",
+ "display_name": "Embeddings",
+ "method": "build_embeddings",
+ "value": "__UNDEFINED__",
+ "cache": true,
+ "required_inputs": null,
+ "allows_loop": false,
+ "group_outputs": false,
+ "options": null,
+ "tool_mode": true
+ }
+ ],
+ "field_order": ["model_name", "base_url"],
+ "beta": false,
"legacy": false,
+ "edited": false,
"metadata": {
- "code_hash": "0db0f99e91e9",
+ "keywords": [
+ "model",
+ "llm",
+ "language model",
+ "large language model"
+ ],
+ "module": "lfx.components.ollama.ollama_embeddings.OllamaEmbeddingsComponent",
+ "code_hash": "c41821735548",
"dependencies": {
+ "total_dependencies": 3,
"dependencies": [
{
"name": "httpx",
@@ -33,125 +119,24 @@
"version": "0.2.1"
},
{
- "name": "langflow",
+ "name": "lfx",
"version": null
}
- ],
- "total_dependencies": 3
- },
- "keywords": [
- "model",
- "llm",
- "language model",
- "large language model"
- ],
- "module": "langflow.components.ollama.ollama_embeddings.OllamaEmbeddingsComponent"
- },
- "minimized": false,
- "output_types": [],
- "outputs": [
- {
- "allows_loop": false,
- "cache": true,
- "display_name": "Embeddings",
- "group_outputs": false,
- "method": "build_embeddings",
- "name": "embeddings",
- "options": null,
- "required_inputs": null,
- "selected": "Embeddings",
- "tool_mode": true,
- "types": [
- "Embeddings"
- ],
- "value": "__UNDEFINED__"
- }
- ],
- "pinned": false,
- "template": {
- "_type": "Component",
- "base_url": {
- "_input_type": "MessageTextInput",
- "advanced": false,
- "display_name": "Ollama Base URL",
- "dynamic": false,
- "info": "",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": true,
- "name": "base_url",
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": "OLLAMA_BASE_URL"
- },
- "code": {
- "advanced": true,
- "dynamic": true,
- "fileTypes": [],
- "file_path": "",
- "info": "",
- "list": false,
- "load_from_db": false,
- "multiline": true,
- "name": "code",
- "password": false,
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "type": "code",
- "value": "from typing import Any\nfrom urllib.parse import urljoin\n\nimport httpx\nfrom langchain_ollama import OllamaEmbeddings\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.ollama_constants import OLLAMA_EMBEDDING_MODELS, URL_LIST\nfrom langflow.field_typing import Embeddings\nfrom langflow.io import DropdownInput, MessageTextInput, Output\n\nHTTP_STATUS_OK = 200\n\n\nclass OllamaEmbeddingsComponent(LCModelComponent):\n display_name: str = \"Ollama Embeddings\"\n description: str = \"Generate embeddings using Ollama models.\"\n documentation = \"https://python.langchain.com/docs/integrations/text_embedding/ollama\"\n icon = \"Ollama\"\n name = \"OllamaEmbeddings\"\n\n inputs = [\n DropdownInput(\n name=\"model_name\",\n display_name=\"Ollama Model\",\n value=\"\",\n options=[],\n real_time_refresh=True,\n refresh_button=True,\n combobox=True,\n required=True,\n ),\n MessageTextInput(\n name=\"base_url\",\n display_name=\"Ollama Base URL\",\n value=\"\",\n required=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Embeddings\", name=\"embeddings\", method=\"build_embeddings\"),\n ]\n\n def build_embeddings(self) -> Embeddings:\n try:\n output = OllamaEmbeddings(model=self.model_name, base_url=self.base_url)\n except Exception as e:\n msg = (\n \"Unable to connect to the Ollama API. \",\n \"Please verify the base URL, ensure the relevant Ollama model is pulled, and try again.\",\n )\n raise ValueError(msg) from e\n return output\n\n async def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None):\n if field_name in {\"base_url\", \"model_name\"} and not await self.is_valid_ollama_url(field_value):\n # Check if any URL in the list is valid\n valid_url = \"\"\n for url in URL_LIST:\n if await self.is_valid_ollama_url(url):\n valid_url = url\n break\n build_config[\"base_url\"][\"value\"] = valid_url\n if field_name in {\"model_name\", \"base_url\", \"tool_model_enabled\"}:\n if await self.is_valid_ollama_url(self.base_url):\n build_config[\"model_name\"][\"options\"] = await self.get_model(self.base_url)\n elif await self.is_valid_ollama_url(build_config[\"base_url\"].get(\"value\", \"\")):\n build_config[\"model_name\"][\"options\"] = await self.get_model(build_config[\"base_url\"].get(\"value\", \"\"))\n else:\n build_config[\"model_name\"][\"options\"] = []\n\n return build_config\n\n async def get_model(self, base_url_value: str) -> list[str]:\n \"\"\"Get the model names from Ollama.\"\"\"\n model_ids = []\n try:\n url = urljoin(base_url_value, \"/api/tags\")\n async with httpx.AsyncClient() as client:\n response = await client.get(url)\n response.raise_for_status()\n data = response.json()\n\n model_ids = [model[\"name\"] for model in data.get(\"models\", [])]\n # this to ensure that not embedding models are included.\n # not even the base models since models can have 1b 2b etc\n # handles cases when embeddings models have tags like :latest - etc.\n model_ids = [\n model\n for model in model_ids\n if any(model.startswith(f\"{embedding_model}\") for embedding_model in OLLAMA_EMBEDDING_MODELS)\n ]\n\n except (ImportError, ValueError, httpx.RequestError) as e:\n msg = \"Could not get model names from Ollama.\"\n raise ValueError(msg) from e\n\n return model_ids\n\n async def is_valid_ollama_url(self, url: str) -> bool:\n try:\n async with httpx.AsyncClient() as client:\n return (await client.get(f\"{url}/api/tags\")).status_code == HTTP_STATUS_OK\n except httpx.RequestError:\n return False\n"
- },
- "model_name": {
- "_input_type": "DropdownInput",
- "advanced": false,
- "combobox": true,
- "dialog_inputs": {},
- "display_name": "Ollama Model",
- "dynamic": false,
- "info": "",
- "name": "model_name",
- "options": [
- "all-minilm:latest"
- ],
- "options_metadata": [],
- "placeholder": "",
- "real_time_refresh": true,
- "refresh_button": true,
- "required": true,
- "show": true,
- "title_case": false,
- "toggle": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "str",
- "value": "all-minilm:latest"
+ ]
}
},
- "tool_mode": false
+ "tool_mode": false,
+ "last_updated": "2025-09-29T18:40:10.242Z",
+ "official": false
},
"showNode": true,
- "type": "OllamaEmbeddings"
- },
- "dragging": false,
- "id": "OllamaEmbeddings-4ah5Q",
- "measured": {
- "height": 286,
- "width": 320
+ "type": "OllamaEmbeddings",
+ "id": "OllamaEmbeddings-vnNn8"
},
+ "id": "OllamaEmbeddings-vnNn8",
"position": {
- "x": 282.29416840859585,
- "y": 279.4218065717267
+ "x": 0,
+ "y": 0
},
- "selected": false,
"type": "genericNode"
}
\ No newline at end of file
diff --git a/flows/components/ollama_llm.json b/flows/components/ollama_llm.json
index 0edf7f13..4aa6ee62 100644
--- a/flows/components/ollama_llm.json
+++ b/flows/components/ollama_llm.json
@@ -1,18 +1,611 @@
{
- "data": {
- "id": "OllamaModel-eCsJx",
- "node": {
- "base_classes": [
- "LanguageModel",
- "Message"
+ "data": {
+ "node": {
+ "template": {
+ "_type": "Component",
+ "base_url": {
+ "tool_mode": false,
+ "trace_as_input": true,
+ "trace_as_metadata": true,
+ "load_from_db": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "base_url",
+ "value": "OLLAMA_BASE_URL",
+ "display_name": "Base URL",
+ "advanced": false,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "Endpoint of the Ollama API.",
+ "real_time_refresh": true,
+ "title_case": false,
+ "type": "str",
+ "_input_type": "MessageTextInput"
+ },
+ "code": {
+ "type": "code",
+ "required": true,
+ "placeholder": "",
+ "list": false,
+ "show": true,
+ "multiline": true,
+ "value": "import asyncio\nfrom typing import Any\nfrom urllib.parse import urljoin\n\nimport httpx\nfrom langchain_ollama import ChatOllama\n\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.base.models.ollama_constants import URL_LIST\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, MessageTextInput, SliderInput\nfrom lfx.log.logger import logger\n\nHTTP_STATUS_OK = 200\n\n\nclass ChatOllamaComponent(LCModelComponent):\n display_name = \"Ollama\"\n description = \"Generate text using Ollama Local LLMs.\"\n icon = \"Ollama\"\n name = \"OllamaModel\"\n\n # Define constants for JSON keys\n JSON_MODELS_KEY = \"models\"\n JSON_NAME_KEY = \"name\"\n JSON_CAPABILITIES_KEY = \"capabilities\"\n DESIRED_CAPABILITY = \"completion\"\n TOOL_CALLING_CAPABILITY = \"tools\"\n\n inputs = [\n MessageTextInput(\n name=\"base_url\",\n display_name=\"Base URL\",\n info=\"Endpoint of the Ollama API.\",\n value=\"\",\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=[],\n info=\"Refer to https://ollama.com/library for more models.\",\n refresh_button=True,\n real_time_refresh=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n MessageTextInput(\n name=\"format\", display_name=\"Format\", info=\"Specify the format of the output (e.g., json).\", advanced=True\n ),\n DictInput(name=\"metadata\", display_name=\"Metadata\", info=\"Metadata to add to the run trace.\", advanced=True),\n DropdownInput(\n name=\"mirostat\",\n display_name=\"Mirostat\",\n options=[\"Disabled\", \"Mirostat\", \"Mirostat 2.0\"],\n info=\"Enable/disable Mirostat sampling for controlling perplexity.\",\n value=\"Disabled\",\n advanced=True,\n real_time_refresh=True,\n ),\n FloatInput(\n name=\"mirostat_eta\",\n display_name=\"Mirostat Eta\",\n info=\"Learning rate for Mirostat algorithm. (Default: 0.1)\",\n advanced=True,\n ),\n FloatInput(\n name=\"mirostat_tau\",\n display_name=\"Mirostat Tau\",\n info=\"Controls the balance between coherence and diversity of the output. (Default: 5.0)\",\n advanced=True,\n ),\n IntInput(\n name=\"num_ctx\",\n display_name=\"Context Window Size\",\n info=\"Size of the context window for generating tokens. (Default: 2048)\",\n advanced=True,\n ),\n IntInput(\n name=\"num_gpu\",\n display_name=\"Number of GPUs\",\n info=\"Number of GPUs to use for computation. (Default: 1 on macOS, 0 to disable)\",\n advanced=True,\n ),\n IntInput(\n name=\"num_thread\",\n display_name=\"Number of Threads\",\n info=\"Number of threads to use during computation. (Default: detected for optimal performance)\",\n advanced=True,\n ),\n IntInput(\n name=\"repeat_last_n\",\n display_name=\"Repeat Last N\",\n info=\"How far back the model looks to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)\",\n advanced=True,\n ),\n FloatInput(\n name=\"repeat_penalty\",\n display_name=\"Repeat Penalty\",\n info=\"Penalty for repetitions in generated text. (Default: 1.1)\",\n advanced=True,\n ),\n FloatInput(name=\"tfs_z\", display_name=\"TFS Z\", info=\"Tail free sampling value. (Default: 1)\", advanced=True),\n IntInput(name=\"timeout\", display_name=\"Timeout\", info=\"Timeout for the request stream.\", advanced=True),\n IntInput(\n name=\"top_k\", display_name=\"Top K\", info=\"Limits token selection to top K. (Default: 40)\", advanced=True\n ),\n FloatInput(name=\"top_p\", display_name=\"Top P\", info=\"Works together with top-k. (Default: 0.9)\", advanced=True),\n BoolInput(name=\"verbose\", display_name=\"Verbose\", info=\"Whether to print out response text.\", advanced=True),\n MessageTextInput(\n name=\"tags\",\n display_name=\"Tags\",\n info=\"Comma-separated list of tags to add to the run trace.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"stop_tokens\",\n display_name=\"Stop Tokens\",\n info=\"Comma-separated list of tokens to signal the model to stop generating text.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"system\", display_name=\"System\", info=\"System to use for generating text.\", advanced=True\n ),\n BoolInput(\n name=\"tool_model_enabled\",\n display_name=\"Tool Model Enabled\",\n info=\"Whether to enable tool calling in the model.\",\n value=True,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"template\", display_name=\"Template\", info=\"Template to use for generating text.\", advanced=True\n ),\n *LCModelComponent.get_base_inputs(),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # Mapping mirostat settings to their corresponding values\n mirostat_options = {\"Mirostat\": 1, \"Mirostat 2.0\": 2}\n\n # Default to 0 for 'Disabled'\n mirostat_value = mirostat_options.get(self.mirostat, 0)\n\n # Set mirostat_eta and mirostat_tau to None if mirostat is disabled\n if mirostat_value == 0:\n mirostat_eta = None\n mirostat_tau = None\n else:\n mirostat_eta = self.mirostat_eta\n mirostat_tau = self.mirostat_tau\n\n # Mapping system settings to their corresponding values\n llm_params = {\n \"base_url\": self.base_url,\n \"model\": self.model_name,\n \"mirostat\": mirostat_value,\n \"format\": self.format,\n \"metadata\": self.metadata,\n \"tags\": self.tags.split(\",\") if self.tags else None,\n \"mirostat_eta\": mirostat_eta,\n \"mirostat_tau\": mirostat_tau,\n \"num_ctx\": self.num_ctx or None,\n \"num_gpu\": self.num_gpu or None,\n \"num_thread\": self.num_thread or None,\n \"repeat_last_n\": self.repeat_last_n or None,\n \"repeat_penalty\": self.repeat_penalty or None,\n \"temperature\": self.temperature or None,\n \"stop\": self.stop_tokens.split(\",\") if self.stop_tokens else None,\n \"system\": self.system,\n \"tfs_z\": self.tfs_z or None,\n \"timeout\": self.timeout or None,\n \"top_k\": self.top_k or None,\n \"top_p\": self.top_p or None,\n \"verbose\": self.verbose,\n \"template\": self.template,\n }\n\n # Remove parameters with None values\n llm_params = {k: v for k, v in llm_params.items() if v is not None}\n\n try:\n output = ChatOllama(**llm_params)\n except Exception as e:\n msg = (\n \"Unable to connect to the Ollama API. \",\n \"Please verify the base URL, ensure the relevant Ollama model is pulled, and try again.\",\n )\n raise ValueError(msg) from e\n\n return output\n\n async def is_valid_ollama_url(self, url: str) -> bool:\n try:\n async with httpx.AsyncClient() as client:\n return (await client.get(urljoin(url, \"api/tags\"))).status_code == HTTP_STATUS_OK\n except httpx.RequestError:\n return False\n\n async def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None):\n if field_name == \"mirostat\":\n if field_value == \"Disabled\":\n build_config[\"mirostat_eta\"][\"advanced\"] = True\n build_config[\"mirostat_tau\"][\"advanced\"] = True\n build_config[\"mirostat_eta\"][\"value\"] = None\n build_config[\"mirostat_tau\"][\"value\"] = None\n\n else:\n build_config[\"mirostat_eta\"][\"advanced\"] = False\n build_config[\"mirostat_tau\"][\"advanced\"] = False\n\n if field_value == \"Mirostat 2.0\":\n build_config[\"mirostat_eta\"][\"value\"] = 0.2\n build_config[\"mirostat_tau\"][\"value\"] = 10\n else:\n build_config[\"mirostat_eta\"][\"value\"] = 0.1\n build_config[\"mirostat_tau\"][\"value\"] = 5\n\n if field_name in {\"base_url\", \"model_name\"}:\n if build_config[\"base_url\"].get(\"load_from_db\", False):\n base_url_value = await self.get_variables(build_config[\"base_url\"].get(\"value\", \"\"), \"base_url\")\n else:\n base_url_value = build_config[\"base_url\"].get(\"value\", \"\")\n\n if not await self.is_valid_ollama_url(base_url_value):\n # Check if any URL in the list is valid\n valid_url = \"\"\n check_urls = URL_LIST\n if self.base_url:\n check_urls = [self.base_url, *URL_LIST]\n for url in check_urls:\n if await self.is_valid_ollama_url(url):\n valid_url = url\n break\n if valid_url != \"\":\n build_config[\"base_url\"][\"value\"] = valid_url\n else:\n msg = \"No valid Ollama URL found.\"\n raise ValueError(msg)\n if field_name in {\"model_name\", \"base_url\", \"tool_model_enabled\"}:\n if await self.is_valid_ollama_url(self.base_url):\n tool_model_enabled = build_config[\"tool_model_enabled\"].get(\"value\", False) or self.tool_model_enabled\n build_config[\"model_name\"][\"options\"] = await self.get_models(\n self.base_url, tool_model_enabled=tool_model_enabled\n )\n elif await self.is_valid_ollama_url(build_config[\"base_url\"].get(\"value\", \"\")):\n tool_model_enabled = build_config[\"tool_model_enabled\"].get(\"value\", False) or self.tool_model_enabled\n build_config[\"model_name\"][\"options\"] = await self.get_models(\n build_config[\"base_url\"].get(\"value\", \"\"), tool_model_enabled=tool_model_enabled\n )\n else:\n build_config[\"model_name\"][\"options\"] = []\n if field_name == \"keep_alive_flag\":\n if field_value == \"Keep\":\n build_config[\"keep_alive\"][\"value\"] = \"-1\"\n build_config[\"keep_alive\"][\"advanced\"] = True\n elif field_value == \"Immediately\":\n build_config[\"keep_alive\"][\"value\"] = \"0\"\n build_config[\"keep_alive\"][\"advanced\"] = True\n else:\n build_config[\"keep_alive\"][\"advanced\"] = False\n\n return build_config\n\n async def get_models(self, base_url_value: str, *, tool_model_enabled: bool | None = None) -> list[str]:\n \"\"\"Fetches a list of models from the Ollama API that do not have the \"embedding\" capability.\n\n Args:\n base_url_value (str): The base URL of the Ollama API.\n tool_model_enabled (bool | None, optional): If True, filters the models further to include\n only those that support tool calling. Defaults to None.\n\n Returns:\n list[str]: A list of model names that do not have the \"embedding\" capability. If\n `tool_model_enabled` is True, only models supporting tool calling are included.\n\n Raises:\n ValueError: If there is an issue with the API request or response, or if the model\n names cannot be retrieved.\n \"\"\"\n try:\n # Normalize the base URL to avoid the repeated \"/\" at the end\n base_url = base_url_value.rstrip(\"/\") + \"/\"\n\n # Ollama REST API to return models\n tags_url = urljoin(base_url, \"api/tags\")\n\n # Ollama REST API to return model capabilities\n show_url = urljoin(base_url, \"api/show\")\n\n async with httpx.AsyncClient() as client:\n # Fetch available models\n tags_response = await client.get(tags_url)\n tags_response.raise_for_status()\n models = tags_response.json()\n if asyncio.iscoroutine(models):\n models = await models\n await logger.adebug(f\"Available models: {models}\")\n\n # Filter models that are NOT embedding models\n model_ids = []\n for model in models[self.JSON_MODELS_KEY]:\n model_name = model[self.JSON_NAME_KEY]\n await logger.adebug(f\"Checking model: {model_name}\")\n\n payload = {\"model\": model_name}\n show_response = await client.post(show_url, json=payload)\n show_response.raise_for_status()\n json_data = show_response.json()\n if asyncio.iscoroutine(json_data):\n json_data = await json_data\n capabilities = json_data.get(self.JSON_CAPABILITIES_KEY, [])\n await logger.adebug(f\"Model: {model_name}, Capabilities: {capabilities}\")\n\n if self.DESIRED_CAPABILITY in capabilities and (\n not tool_model_enabled or self.TOOL_CALLING_CAPABILITY in capabilities\n ):\n model_ids.append(model_name)\n\n except (httpx.RequestError, ValueError) as e:\n msg = \"Could not get model names from Ollama.\"\n raise ValueError(msg) from e\n\n return model_ids\n",
+ "fileTypes": [],
+ "file_path": "",
+ "password": false,
+ "name": "code",
+ "advanced": true,
+ "dynamic": true,
+ "info": "",
+ "load_from_db": false,
+ "title_case": false
+ },
+ "format": {
+ "tool_mode": false,
+ "trace_as_input": true,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "format",
+ "value": "",
+ "display_name": "Format",
+ "advanced": true,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "Specify the format of the output (e.g., json).",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "MessageTextInput"
+ },
+ "input_value": {
+ "trace_as_input": true,
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "input_value",
+ "value": "",
+ "display_name": "Input",
+ "advanced": false,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "MessageInput"
+ },
+ "metadata": {
+ "tool_mode": false,
+ "trace_as_input": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "metadata",
+ "value": {},
+ "display_name": "Metadata",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Metadata to add to the run trace.",
+ "title_case": false,
+ "type": "dict",
+ "_input_type": "DictInput"
+ },
+ "mirostat": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "options": [
+ "Disabled",
+ "Mirostat",
+ "Mirostat 2.0"
+ ],
+ "options_metadata": [],
+ "combobox": false,
+ "dialog_inputs": {},
+ "toggle": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "mirostat",
+ "value": "Disabled",
+ "display_name": "Mirostat",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Enable/disable Mirostat sampling for controlling perplexity.",
+ "real_time_refresh": true,
+ "title_case": false,
+ "external_options": {},
+ "type": "str",
+ "_input_type": "DropdownInput"
+ },
+ "mirostat_eta": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "mirostat_eta",
+ "value": "",
+ "display_name": "Mirostat Eta",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Learning rate for Mirostat algorithm. (Default: 0.1)",
+ "title_case": false,
+ "type": "float",
+ "_input_type": "FloatInput"
+ },
+ "mirostat_tau": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "mirostat_tau",
+ "value": "",
+ "display_name": "Mirostat Tau",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Controls the balance between coherence and diversity of the output. (Default: 5.0)",
+ "title_case": false,
+ "type": "float",
+ "_input_type": "FloatInput"
+ },
+ "model_name": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "options": [],
+ "options_metadata": [],
+ "combobox": false,
+ "dialog_inputs": {},
+ "toggle": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "model_name",
+ "value": "",
+ "display_name": "Model Name",
+ "advanced": false,
+ "dynamic": false,
+ "info": "Refer to https://ollama.com/library for more models.",
+ "real_time_refresh": true,
+ "refresh_button": true,
+ "title_case": false,
+ "external_options": {},
+ "type": "str",
+ "_input_type": "DropdownInput"
+ },
+ "num_ctx": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "num_ctx",
+ "value": "",
+ "display_name": "Context Window Size",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Size of the context window for generating tokens. (Default: 2048)",
+ "title_case": false,
+ "type": "int",
+ "_input_type": "IntInput"
+ },
+ "num_gpu": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "num_gpu",
+ "value": "",
+ "display_name": "Number of GPUs",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Number of GPUs to use for computation. (Default: 1 on macOS, 0 to disable)",
+ "title_case": false,
+ "type": "int",
+ "_input_type": "IntInput"
+ },
+ "num_thread": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "num_thread",
+ "value": "",
+ "display_name": "Number of Threads",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Number of threads to use during computation. (Default: detected for optimal performance)",
+ "title_case": false,
+ "type": "int",
+ "_input_type": "IntInput"
+ },
+ "repeat_last_n": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "repeat_last_n",
+ "value": "",
+ "display_name": "Repeat Last N",
+ "advanced": true,
+ "dynamic": false,
+ "info": "How far back the model looks to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)",
+ "title_case": false,
+ "type": "int",
+ "_input_type": "IntInput"
+ },
+ "repeat_penalty": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "repeat_penalty",
+ "value": "",
+ "display_name": "Repeat Penalty",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Penalty for repetitions in generated text. (Default: 1.1)",
+ "title_case": false,
+ "type": "float",
+ "_input_type": "FloatInput"
+ },
+ "stop_tokens": {
+ "tool_mode": false,
+ "trace_as_input": true,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "stop_tokens",
+ "value": "",
+ "display_name": "Stop Tokens",
+ "advanced": true,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "Comma-separated list of tokens to signal the model to stop generating text.",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "MessageTextInput"
+ },
+ "stream": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "stream",
+ "value": false,
+ "display_name": "Stream",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Stream the response from the model. Streaming works only in Chat.",
+ "title_case": false,
+ "type": "bool",
+ "_input_type": "BoolInput"
+ },
+ "system": {
+ "tool_mode": false,
+ "trace_as_input": true,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "system",
+ "value": "",
+ "display_name": "System",
+ "advanced": true,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "System to use for generating text.",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "MessageTextInput"
+ },
+ "system_message": {
+ "tool_mode": false,
+ "trace_as_input": true,
+ "multiline": true,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "system_message",
+ "value": "",
+ "display_name": "System Message",
+ "advanced": false,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "System message to pass to the model.",
+ "title_case": false,
+ "copy_field": false,
+ "type": "str",
+ "_input_type": "MultilineInput"
+ },
+ "tags": {
+ "tool_mode": false,
+ "trace_as_input": true,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "tags",
+ "value": "",
+ "display_name": "Tags",
+ "advanced": true,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "Comma-separated list of tags to add to the run trace.",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "MessageTextInput"
+ },
+ "temperature": {
+ "tool_mode": false,
+ "min_label": "",
+ "max_label": "",
+ "min_label_icon": "",
+ "max_label_icon": "",
+ "slider_buttons": false,
+ "slider_buttons_options": [],
+ "slider_input": false,
+ "range_spec": {
+ "step_type": "float",
+ "min": 0,
+ "max": 1,
+ "step": 0.01
+ },
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "temperature",
+ "value": 0.1,
+ "display_name": "Temperature",
+ "advanced": true,
+ "dynamic": false,
+ "info": "",
+ "title_case": false,
+ "type": "slider",
+ "_input_type": "SliderInput"
+ },
+ "template": {
+ "tool_mode": false,
+ "trace_as_input": true,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "template",
+ "value": "",
+ "display_name": "Template",
+ "advanced": true,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "Template to use for generating text.",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "MessageTextInput"
+ },
+ "tfs_z": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "tfs_z",
+ "value": "",
+ "display_name": "TFS Z",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Tail free sampling value. (Default: 1)",
+ "title_case": false,
+ "type": "float",
+ "_input_type": "FloatInput"
+ },
+ "timeout": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "timeout",
+ "value": "",
+ "display_name": "Timeout",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Timeout for the request stream.",
+ "title_case": false,
+ "type": "int",
+ "_input_type": "IntInput"
+ },
+ "tool_model_enabled": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "tool_model_enabled",
+ "value": true,
+ "display_name": "Tool Model Enabled",
+ "advanced": false,
+ "dynamic": false,
+ "info": "Whether to enable tool calling in the model.",
+ "real_time_refresh": true,
+ "title_case": false,
+ "type": "bool",
+ "_input_type": "BoolInput"
+ },
+ "top_k": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "top_k",
+ "value": "",
+ "display_name": "Top K",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Limits token selection to top K. (Default: 40)",
+ "title_case": false,
+ "type": "int",
+ "_input_type": "IntInput"
+ },
+ "top_p": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "top_p",
+ "value": "",
+ "display_name": "Top P",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Works together with top-k. (Default: 0.9)",
+ "title_case": false,
+ "type": "float",
+ "_input_type": "FloatInput"
+ },
+ "verbose": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "verbose",
+ "value": false,
+ "display_name": "Verbose",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Whether to print out response text.",
+ "title_case": false,
+ "type": "bool",
+ "_input_type": "BoolInput"
+ }
+ },
+ "description": "Generate text using Ollama Local LLMs.",
+ "icon": "Ollama",
+ "base_classes": [
+ "LanguageModel",
+ "Message"
+ ],
+ "display_name": "Ollama",
+ "documentation": "",
+ "minimized": false,
+ "custom_fields": {},
+ "output_types": [],
+ "pinned": false,
+ "conditional_paths": [],
+ "frozen": false,
+ "outputs": [
+ {
+ "types": [
+ "Message"
+ ],
+ "name": "text_output",
+ "display_name": "Model Response",
+ "method": "text_response",
+ "value": "__UNDEFINED__",
+ "cache": true,
+ "required_inputs": null,
+ "allows_loop": false,
+ "group_outputs": false,
+ "options": null,
+ "tool_mode": true
+ },
+ {
+ "types": [
+ "LanguageModel"
+ ],
+ "selected": "LanguageModel",
+ "name": "model_output",
+ "display_name": "Language Model",
+ "method": "build_model",
+ "value": "__UNDEFINED__",
+ "cache": true,
+ "required_inputs": null,
+ "allows_loop": false,
+ "group_outputs": false,
+ "options": null,
+ "tool_mode": true
+ }
],
- "beta": false,
- "conditional_paths": [],
- "custom_fields": {},
- "description": "Generate text using Ollama Local LLMs.",
- "display_name": "Ollama",
- "documentation": "",
- "edited": false,
"field_order": [
"base_url",
"model_name",
@@ -41,13 +634,20 @@
"system_message",
"stream"
],
- "frozen": false,
- "icon": "Ollama",
- "last_updated": "2025-09-22T20:14:45.057Z",
+ "beta": false,
"legacy": false,
+ "edited": false,
"metadata": {
- "code_hash": "af399d429d23",
+ "keywords": [
+ "model",
+ "llm",
+ "language model",
+ "large language model"
+ ],
+ "module": "lfx.components.ollama.ollama.ChatOllamaComponent",
+ "code_hash": "54de3b5da388",
"dependencies": {
+ "total_dependencies": 3,
"dependencies": [
{
"name": "httpx",
@@ -58,630 +658,25 @@
"version": "0.2.1"
},
{
- "name": "langflow",
+ "name": "lfx",
"version": null
}
- ],
- "total_dependencies": 3
- },
- "keywords": [
- "model",
- "llm",
- "language model",
- "large language model"
- ],
- "module": "langflow.components.ollama.ollama.ChatOllamaComponent"
- },
- "minimized": false,
- "output_types": [],
- "outputs": [
- {
- "allows_loop": false,
- "cache": true,
- "display_name": "Model Response",
- "group_outputs": false,
- "method": "text_response",
- "name": "text_output",
- "options": null,
- "required_inputs": null,
- "tool_mode": true,
- "types": [
- "Message"
- ],
- "value": "__UNDEFINED__"
- },
- {
- "allows_loop": false,
- "cache": true,
- "display_name": "Language Model",
- "group_outputs": false,
- "method": "build_model",
- "name": "model_output",
- "options": null,
- "required_inputs": null,
- "selected": "LanguageModel",
- "tool_mode": true,
- "types": [
- "LanguageModel"
- ],
- "value": "__UNDEFINED__"
- }
- ],
- "pinned": false,
- "template": {
- "_type": "Component",
- "base_url": {
- "_input_type": "MessageTextInput",
- "advanced": false,
- "display_name": "Base URL",
- "dynamic": false,
- "info": "Endpoint of the Ollama API.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": true,
- "name": "base_url",
- "placeholder": "",
- "real_time_refresh": true,
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": "OLLAMA_BASE_URL"
- },
- "code": {
- "advanced": true,
- "dynamic": true,
- "fileTypes": [],
- "file_path": "",
- "info": "",
- "list": false,
- "load_from_db": false,
- "multiline": true,
- "name": "code",
- "password": false,
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "type": "code",
- "value": "import asyncio\nfrom typing import Any\nfrom urllib.parse import urljoin\n\nimport httpx\nfrom langchain_ollama import ChatOllama\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.ollama_constants import URL_LIST\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, MessageTextInput, SliderInput\nfrom langflow.logging import logger\n\nHTTP_STATUS_OK = 200\n\n\nclass ChatOllamaComponent(LCModelComponent):\n display_name = \"Ollama\"\n description = \"Generate text using Ollama Local LLMs.\"\n icon = \"Ollama\"\n name = \"OllamaModel\"\n\n # Define constants for JSON keys\n JSON_MODELS_KEY = \"models\"\n JSON_NAME_KEY = \"name\"\n JSON_CAPABILITIES_KEY = \"capabilities\"\n DESIRED_CAPABILITY = \"completion\"\n TOOL_CALLING_CAPABILITY = \"tools\"\n\n inputs = [\n MessageTextInput(\n name=\"base_url\",\n display_name=\"Base URL\",\n info=\"Endpoint of the Ollama API.\",\n value=\"\",\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=[],\n info=\"Refer to https://ollama.com/library for more models.\",\n refresh_button=True,\n real_time_refresh=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n MessageTextInput(\n name=\"format\", display_name=\"Format\", info=\"Specify the format of the output (e.g., json).\", advanced=True\n ),\n DictInput(name=\"metadata\", display_name=\"Metadata\", info=\"Metadata to add to the run trace.\", advanced=True),\n DropdownInput(\n name=\"mirostat\",\n display_name=\"Mirostat\",\n options=[\"Disabled\", \"Mirostat\", \"Mirostat 2.0\"],\n info=\"Enable/disable Mirostat sampling for controlling perplexity.\",\n value=\"Disabled\",\n advanced=True,\n real_time_refresh=True,\n ),\n FloatInput(\n name=\"mirostat_eta\",\n display_name=\"Mirostat Eta\",\n info=\"Learning rate for Mirostat algorithm. (Default: 0.1)\",\n advanced=True,\n ),\n FloatInput(\n name=\"mirostat_tau\",\n display_name=\"Mirostat Tau\",\n info=\"Controls the balance between coherence and diversity of the output. (Default: 5.0)\",\n advanced=True,\n ),\n IntInput(\n name=\"num_ctx\",\n display_name=\"Context Window Size\",\n info=\"Size of the context window for generating tokens. (Default: 2048)\",\n advanced=True,\n ),\n IntInput(\n name=\"num_gpu\",\n display_name=\"Number of GPUs\",\n info=\"Number of GPUs to use for computation. (Default: 1 on macOS, 0 to disable)\",\n advanced=True,\n ),\n IntInput(\n name=\"num_thread\",\n display_name=\"Number of Threads\",\n info=\"Number of threads to use during computation. (Default: detected for optimal performance)\",\n advanced=True,\n ),\n IntInput(\n name=\"repeat_last_n\",\n display_name=\"Repeat Last N\",\n info=\"How far back the model looks to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)\",\n advanced=True,\n ),\n FloatInput(\n name=\"repeat_penalty\",\n display_name=\"Repeat Penalty\",\n info=\"Penalty for repetitions in generated text. (Default: 1.1)\",\n advanced=True,\n ),\n FloatInput(name=\"tfs_z\", display_name=\"TFS Z\", info=\"Tail free sampling value. (Default: 1)\", advanced=True),\n IntInput(name=\"timeout\", display_name=\"Timeout\", info=\"Timeout for the request stream.\", advanced=True),\n IntInput(\n name=\"top_k\", display_name=\"Top K\", info=\"Limits token selection to top K. (Default: 40)\", advanced=True\n ),\n FloatInput(name=\"top_p\", display_name=\"Top P\", info=\"Works together with top-k. (Default: 0.9)\", advanced=True),\n BoolInput(name=\"verbose\", display_name=\"Verbose\", info=\"Whether to print out response text.\", advanced=True),\n MessageTextInput(\n name=\"tags\",\n display_name=\"Tags\",\n info=\"Comma-separated list of tags to add to the run trace.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"stop_tokens\",\n display_name=\"Stop Tokens\",\n info=\"Comma-separated list of tokens to signal the model to stop generating text.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"system\", display_name=\"System\", info=\"System to use for generating text.\", advanced=True\n ),\n BoolInput(\n name=\"tool_model_enabled\",\n display_name=\"Tool Model Enabled\",\n info=\"Whether to enable tool calling in the model.\",\n value=True,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"template\", display_name=\"Template\", info=\"Template to use for generating text.\", advanced=True\n ),\n *LCModelComponent._base_inputs,\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # Mapping mirostat settings to their corresponding values\n mirostat_options = {\"Mirostat\": 1, \"Mirostat 2.0\": 2}\n\n # Default to 0 for 'Disabled'\n mirostat_value = mirostat_options.get(self.mirostat, 0)\n\n # Set mirostat_eta and mirostat_tau to None if mirostat is disabled\n if mirostat_value == 0:\n mirostat_eta = None\n mirostat_tau = None\n else:\n mirostat_eta = self.mirostat_eta\n mirostat_tau = self.mirostat_tau\n\n # Mapping system settings to their corresponding values\n llm_params = {\n \"base_url\": self.base_url,\n \"model\": self.model_name,\n \"mirostat\": mirostat_value,\n \"format\": self.format,\n \"metadata\": self.metadata,\n \"tags\": self.tags.split(\",\") if self.tags else None,\n \"mirostat_eta\": mirostat_eta,\n \"mirostat_tau\": mirostat_tau,\n \"num_ctx\": self.num_ctx or None,\n \"num_gpu\": self.num_gpu or None,\n \"num_thread\": self.num_thread or None,\n \"repeat_last_n\": self.repeat_last_n or None,\n \"repeat_penalty\": self.repeat_penalty or None,\n \"temperature\": self.temperature or None,\n \"stop\": self.stop_tokens.split(\",\") if self.stop_tokens else None,\n \"system\": self.system,\n \"tfs_z\": self.tfs_z or None,\n \"timeout\": self.timeout or None,\n \"top_k\": self.top_k or None,\n \"top_p\": self.top_p or None,\n \"verbose\": self.verbose,\n \"template\": self.template,\n }\n\n # Remove parameters with None values\n llm_params = {k: v for k, v in llm_params.items() if v is not None}\n\n try:\n output = ChatOllama(**llm_params)\n except Exception as e:\n msg = (\n \"Unable to connect to the Ollama API. \",\n \"Please verify the base URL, ensure the relevant Ollama model is pulled, and try again.\",\n )\n raise ValueError(msg) from e\n\n return output\n\n async def is_valid_ollama_url(self, url: str) -> bool:\n try:\n async with httpx.AsyncClient() as client:\n return (await client.get(urljoin(url, \"api/tags\"))).status_code == HTTP_STATUS_OK\n except httpx.RequestError:\n return False\n\n async def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None):\n if field_name == \"mirostat\":\n if field_value == \"Disabled\":\n build_config[\"mirostat_eta\"][\"advanced\"] = True\n build_config[\"mirostat_tau\"][\"advanced\"] = True\n build_config[\"mirostat_eta\"][\"value\"] = None\n build_config[\"mirostat_tau\"][\"value\"] = None\n\n else:\n build_config[\"mirostat_eta\"][\"advanced\"] = False\n build_config[\"mirostat_tau\"][\"advanced\"] = False\n\n if field_value == \"Mirostat 2.0\":\n build_config[\"mirostat_eta\"][\"value\"] = 0.2\n build_config[\"mirostat_tau\"][\"value\"] = 10\n else:\n build_config[\"mirostat_eta\"][\"value\"] = 0.1\n build_config[\"mirostat_tau\"][\"value\"] = 5\n\n if field_name in {\"base_url\", \"model_name\"}:\n if build_config[\"base_url\"].get(\"load_from_db\", False):\n base_url_value = await self.get_variables(build_config[\"base_url\"].get(\"value\", \"\"), \"base_url\")\n else:\n base_url_value = build_config[\"base_url\"].get(\"value\", \"\")\n\n if not await self.is_valid_ollama_url(base_url_value):\n # Check if any URL in the list is valid\n valid_url = \"\"\n check_urls = URL_LIST\n if self.base_url:\n check_urls = [self.base_url, *URL_LIST]\n for url in check_urls:\n if await self.is_valid_ollama_url(url):\n valid_url = url\n break\n if valid_url != \"\":\n build_config[\"base_url\"][\"value\"] = valid_url\n else:\n msg = \"No valid Ollama URL found.\"\n raise ValueError(msg)\n if field_name in {\"model_name\", \"base_url\", \"tool_model_enabled\"}:\n if await self.is_valid_ollama_url(self.base_url):\n tool_model_enabled = build_config[\"tool_model_enabled\"].get(\"value\", False) or self.tool_model_enabled\n build_config[\"model_name\"][\"options\"] = await self.get_models(\n self.base_url, tool_model_enabled=tool_model_enabled\n )\n elif await self.is_valid_ollama_url(build_config[\"base_url\"].get(\"value\", \"\")):\n tool_model_enabled = build_config[\"tool_model_enabled\"].get(\"value\", False) or self.tool_model_enabled\n build_config[\"model_name\"][\"options\"] = await self.get_models(\n build_config[\"base_url\"].get(\"value\", \"\"), tool_model_enabled=tool_model_enabled\n )\n else:\n build_config[\"model_name\"][\"options\"] = []\n if field_name == \"keep_alive_flag\":\n if field_value == \"Keep\":\n build_config[\"keep_alive\"][\"value\"] = \"-1\"\n build_config[\"keep_alive\"][\"advanced\"] = True\n elif field_value == \"Immediately\":\n build_config[\"keep_alive\"][\"value\"] = \"0\"\n build_config[\"keep_alive\"][\"advanced\"] = True\n else:\n build_config[\"keep_alive\"][\"advanced\"] = False\n\n return build_config\n\n async def get_models(self, base_url_value: str, *, tool_model_enabled: bool | None = None) -> list[str]:\n \"\"\"Fetches a list of models from the Ollama API that do not have the \"embedding\" capability.\n\n Args:\n base_url_value (str): The base URL of the Ollama API.\n tool_model_enabled (bool | None, optional): If True, filters the models further to include\n only those that support tool calling. Defaults to None.\n\n Returns:\n list[str]: A list of model names that do not have the \"embedding\" capability. If\n `tool_model_enabled` is True, only models supporting tool calling are included.\n\n Raises:\n ValueError: If there is an issue with the API request or response, or if the model\n names cannot be retrieved.\n \"\"\"\n try:\n # Normalize the base URL to avoid the repeated \"/\" at the end\n base_url = base_url_value.rstrip(\"/\") + \"/\"\n\n # Ollama REST API to return models\n tags_url = urljoin(base_url, \"api/tags\")\n\n # Ollama REST API to return model capabilities\n show_url = urljoin(base_url, \"api/show\")\n\n async with httpx.AsyncClient() as client:\n # Fetch available models\n tags_response = await client.get(tags_url)\n tags_response.raise_for_status()\n models = tags_response.json()\n if asyncio.iscoroutine(models):\n models = await models\n await logger.adebug(f\"Available models: {models}\")\n\n # Filter models that are NOT embedding models\n model_ids = []\n for model in models[self.JSON_MODELS_KEY]:\n model_name = model[self.JSON_NAME_KEY]\n await logger.adebug(f\"Checking model: {model_name}\")\n\n payload = {\"model\": model_name}\n show_response = await client.post(show_url, json=payload)\n show_response.raise_for_status()\n json_data = show_response.json()\n if asyncio.iscoroutine(json_data):\n json_data = await json_data\n capabilities = json_data.get(self.JSON_CAPABILITIES_KEY, [])\n await logger.adebug(f\"Model: {model_name}, Capabilities: {capabilities}\")\n\n if self.DESIRED_CAPABILITY in capabilities and (\n not tool_model_enabled or self.TOOL_CALLING_CAPABILITY in capabilities\n ):\n model_ids.append(model_name)\n\n except (httpx.RequestError, ValueError) as e:\n msg = \"Could not get model names from Ollama.\"\n raise ValueError(msg) from e\n\n return model_ids\n"
- },
- "format": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Format",
- "dynamic": false,
- "info": "Specify the format of the output (e.g., json).",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "format",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "input_value": {
- "_input_type": "MessageInput",
- "advanced": false,
- "display_name": "Input",
- "dynamic": false,
- "info": "",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "input_value",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "metadata": {
- "_input_type": "DictInput",
- "advanced": true,
- "display_name": "Metadata",
- "dynamic": false,
- "info": "Metadata to add to the run trace.",
- "list": false,
- "list_add_label": "Add More",
- "name": "metadata",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "type": "dict",
- "value": {}
- },
- "mirostat": {
- "_input_type": "DropdownInput",
- "advanced": true,
- "combobox": false,
- "dialog_inputs": {},
- "display_name": "Mirostat",
- "dynamic": false,
- "info": "Enable/disable Mirostat sampling for controlling perplexity.",
- "name": "mirostat",
- "options": [
- "Disabled",
- "Mirostat",
- "Mirostat 2.0"
- ],
- "options_metadata": [],
- "placeholder": "",
- "real_time_refresh": true,
- "required": false,
- "show": true,
- "title_case": false,
- "toggle": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "str",
- "value": "Disabled"
- },
- "mirostat_eta": {
- "_input_type": "FloatInput",
- "advanced": true,
- "display_name": "Mirostat Eta",
- "dynamic": false,
- "info": "Learning rate for Mirostat algorithm. (Default: 0.1)",
- "list": false,
- "list_add_label": "Add More",
- "name": "mirostat_eta",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "float",
- "value": ""
- },
- "mirostat_tau": {
- "_input_type": "FloatInput",
- "advanced": true,
- "display_name": "Mirostat Tau",
- "dynamic": false,
- "info": "Controls the balance between coherence and diversity of the output. (Default: 5.0)",
- "list": false,
- "list_add_label": "Add More",
- "name": "mirostat_tau",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "float",
- "value": ""
- },
- "model_name": {
- "_input_type": "DropdownInput",
- "advanced": false,
- "combobox": false,
- "dialog_inputs": {},
- "display_name": "Model Name",
- "dynamic": false,
- "info": "Refer to https://ollama.com/library for more models.",
- "name": "model_name",
- "options": [
- "qwen3:4b"
- ],
- "options_metadata": [],
- "placeholder": "",
- "real_time_refresh": true,
- "refresh_button": true,
- "required": false,
- "show": true,
- "title_case": false,
- "toggle": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "str",
- "value": "qwen3:4b"
- },
- "num_ctx": {
- "_input_type": "IntInput",
- "advanced": true,
- "display_name": "Context Window Size",
- "dynamic": false,
- "info": "Size of the context window for generating tokens. (Default: 2048)",
- "list": false,
- "list_add_label": "Add More",
- "name": "num_ctx",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "int",
- "value": ""
- },
- "num_gpu": {
- "_input_type": "IntInput",
- "advanced": true,
- "display_name": "Number of GPUs",
- "dynamic": false,
- "info": "Number of GPUs to use for computation. (Default: 1 on macOS, 0 to disable)",
- "list": false,
- "list_add_label": "Add More",
- "name": "num_gpu",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "int",
- "value": ""
- },
- "num_thread": {
- "_input_type": "IntInput",
- "advanced": true,
- "display_name": "Number of Threads",
- "dynamic": false,
- "info": "Number of threads to use during computation. (Default: detected for optimal performance)",
- "list": false,
- "list_add_label": "Add More",
- "name": "num_thread",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "int",
- "value": ""
- },
- "repeat_last_n": {
- "_input_type": "IntInput",
- "advanced": true,
- "display_name": "Repeat Last N",
- "dynamic": false,
- "info": "How far back the model looks to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)",
- "list": false,
- "list_add_label": "Add More",
- "name": "repeat_last_n",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "int",
- "value": ""
- },
- "repeat_penalty": {
- "_input_type": "FloatInput",
- "advanced": true,
- "display_name": "Repeat Penalty",
- "dynamic": false,
- "info": "Penalty for repetitions in generated text. (Default: 1.1)",
- "list": false,
- "list_add_label": "Add More",
- "name": "repeat_penalty",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "float",
- "value": ""
- },
- "stop_tokens": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Stop Tokens",
- "dynamic": false,
- "info": "Comma-separated list of tokens to signal the model to stop generating text.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "stop_tokens",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "stream": {
- "_input_type": "BoolInput",
- "advanced": true,
- "display_name": "Stream",
- "dynamic": false,
- "info": "Stream the response from the model. Streaming works only in Chat.",
- "list": false,
- "list_add_label": "Add More",
- "name": "stream",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "bool",
- "value": false
- },
- "system": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "System",
- "dynamic": false,
- "info": "System to use for generating text.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "system",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "system_message": {
- "_input_type": "MultilineInput",
- "advanced": false,
- "copy_field": false,
- "display_name": "System Message",
- "dynamic": false,
- "info": "System message to pass to the model.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "multiline": true,
- "name": "system_message",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "tags": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Tags",
- "dynamic": false,
- "info": "Comma-separated list of tags to add to the run trace.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "tags",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "temperature": {
- "_input_type": "SliderInput",
- "advanced": true,
- "display_name": "Temperature",
- "dynamic": false,
- "info": "",
- "max_label": "",
- "max_label_icon": "",
- "min_label": "",
- "min_label_icon": "",
- "name": "temperature",
- "placeholder": "",
- "range_spec": {
- "max": 1,
- "min": 0,
- "step": 0.01,
- "step_type": "float"
- },
- "required": false,
- "show": true,
- "slider_buttons": false,
- "slider_buttons_options": [],
- "slider_input": false,
- "title_case": false,
- "tool_mode": false,
- "type": "slider",
- "value": 0.1
- },
- "template": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Template",
- "dynamic": false,
- "info": "Template to use for generating text.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "template",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "tfs_z": {
- "_input_type": "FloatInput",
- "advanced": true,
- "display_name": "TFS Z",
- "dynamic": false,
- "info": "Tail free sampling value. (Default: 1)",
- "list": false,
- "list_add_label": "Add More",
- "name": "tfs_z",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "float",
- "value": ""
- },
- "timeout": {
- "_input_type": "IntInput",
- "advanced": true,
- "display_name": "Timeout",
- "dynamic": false,
- "info": "Timeout for the request stream.",
- "list": false,
- "list_add_label": "Add More",
- "name": "timeout",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "int",
- "value": ""
- },
- "tool_model_enabled": {
- "_input_type": "BoolInput",
- "advanced": false,
- "display_name": "Tool Model Enabled",
- "dynamic": false,
- "info": "Whether to enable tool calling in the model.",
- "list": false,
- "list_add_label": "Add More",
- "name": "tool_model_enabled",
- "placeholder": "",
- "real_time_refresh": true,
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "bool",
- "value": true
- },
- "top_k": {
- "_input_type": "IntInput",
- "advanced": true,
- "display_name": "Top K",
- "dynamic": false,
- "info": "Limits token selection to top K. (Default: 40)",
- "list": false,
- "list_add_label": "Add More",
- "name": "top_k",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "int",
- "value": ""
- },
- "top_p": {
- "_input_type": "FloatInput",
- "advanced": true,
- "display_name": "Top P",
- "dynamic": false,
- "info": "Works together with top-k. (Default: 0.9)",
- "list": false,
- "list_add_label": "Add More",
- "name": "top_p",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "float",
- "value": ""
- },
- "verbose": {
- "_input_type": "BoolInput",
- "advanced": true,
- "display_name": "Verbose",
- "dynamic": false,
- "info": "Whether to print out response text.",
- "list": false,
- "list_add_label": "Add More",
- "name": "verbose",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "bool",
- "value": false
+ ]
}
},
- "tool_mode": false
+ "tool_mode": false,
+ "last_updated": "2025-09-29T18:39:30.798Z",
+ "official": false
},
- "selected_output": "model_output",
"showNode": true,
- "type": "OllamaModel"
- },
- "dragging": false,
- "id": "OllamaModel-eCsJx",
- "measured": {
- "height": 494,
- "width": 320
+ "type": "OllamaModel",
+ "id": "OllamaModel-8Re0J",
+ "selected_output": "model_output"
},
+ "id": "OllamaModel-8Re0J",
"position": {
- "x": 248.08287272472313,
- "y": 216.98088326271431
+ "x": 0,
+ "y": 0
},
- "selected": false,
"type": "genericNode"
}
\ No newline at end of file
diff --git a/flows/components/ollama_llm_text.json b/flows/components/ollama_llm_text.json
index 846e8313..9b2b5482 100644
--- a/flows/components/ollama_llm_text.json
+++ b/flows/components/ollama_llm_text.json
@@ -1,687 +1,683 @@
{
- "data": {
- "id": "OllamaModel-XDGqZ",
- "node": {
- "base_classes": [
- "LanguageModel",
- "Message"
- ],
- "beta": false,
- "conditional_paths": [],
- "custom_fields": {},
- "description": "Generate text using Ollama Local LLMs.",
- "display_name": "Ollama",
- "documentation": "",
- "edited": false,
- "field_order": [
- "base_url",
- "model_name",
- "temperature",
- "format",
- "metadata",
- "mirostat",
- "mirostat_eta",
- "mirostat_tau",
- "num_ctx",
- "num_gpu",
- "num_thread",
- "repeat_last_n",
- "repeat_penalty",
- "tfs_z",
- "timeout",
- "top_k",
- "top_p",
- "verbose",
- "tags",
- "stop_tokens",
- "system",
- "tool_model_enabled",
- "template",
- "input_value",
- "system_message",
- "stream"
- ],
- "frozen": false,
- "icon": "Ollama",
- "last_updated": "2025-09-22T20:14:45.057Z",
- "legacy": false,
- "metadata": {
- "code_hash": "af399d429d23",
- "dependencies": {
- "dependencies": [
- {
- "name": "httpx",
- "version": "0.28.1"
- },
- {
- "name": "langchain_ollama",
- "version": "0.2.1"
- },
- {
- "name": "langflow",
- "version": null
- }
- ],
- "total_dependencies": 3
- },
- "keywords": [
- "model",
- "llm",
- "language model",
- "large language model"
+ "data": {
+ "node": {
+ "template": {
+ "_type": "Component",
+ "base_url": {
+ "tool_mode": false,
+ "trace_as_input": true,
+ "trace_as_metadata": true,
+ "load_from_db": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "base_url",
+ "value": "OLLAMA_BASE_URL",
+ "display_name": "Base URL",
+ "advanced": false,
+ "input_types": [
+ "Message"
],
- "module": "langflow.components.ollama.ollama.ChatOllamaComponent"
+ "dynamic": false,
+ "info": "Endpoint of the Ollama API.",
+ "real_time_refresh": true,
+ "title_case": false,
+ "type": "str",
+ "_input_type": "MessageTextInput"
},
- "minimized": false,
- "output_types": [],
- "outputs": [
- {
- "allows_loop": false,
- "cache": true,
- "display_name": "Model Response",
- "group_outputs": false,
- "method": "text_response",
- "name": "text_output",
- "options": null,
- "required_inputs": null,
- "selected": "Message",
- "tool_mode": true,
- "types": [
- "Message"
- ],
- "value": "__UNDEFINED__"
+ "code": {
+ "type": "code",
+ "required": true,
+ "placeholder": "",
+ "list": false,
+ "show": true,
+ "multiline": true,
+ "value": "import asyncio\nfrom typing import Any\nfrom urllib.parse import urljoin\n\nimport httpx\nfrom langchain_ollama import ChatOllama\n\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.base.models.ollama_constants import URL_LIST\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, MessageTextInput, SliderInput\nfrom lfx.log.logger import logger\n\nHTTP_STATUS_OK = 200\n\n\nclass ChatOllamaComponent(LCModelComponent):\n display_name = \"Ollama\"\n description = \"Generate text using Ollama Local LLMs.\"\n icon = \"Ollama\"\n name = \"OllamaModel\"\n\n # Define constants for JSON keys\n JSON_MODELS_KEY = \"models\"\n JSON_NAME_KEY = \"name\"\n JSON_CAPABILITIES_KEY = \"capabilities\"\n DESIRED_CAPABILITY = \"completion\"\n TOOL_CALLING_CAPABILITY = \"tools\"\n\n inputs = [\n MessageTextInput(\n name=\"base_url\",\n display_name=\"Base URL\",\n info=\"Endpoint of the Ollama API.\",\n value=\"\",\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=[],\n info=\"Refer to https://ollama.com/library for more models.\",\n refresh_button=True,\n real_time_refresh=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n MessageTextInput(\n name=\"format\", display_name=\"Format\", info=\"Specify the format of the output (e.g., json).\", advanced=True\n ),\n DictInput(name=\"metadata\", display_name=\"Metadata\", info=\"Metadata to add to the run trace.\", advanced=True),\n DropdownInput(\n name=\"mirostat\",\n display_name=\"Mirostat\",\n options=[\"Disabled\", \"Mirostat\", \"Mirostat 2.0\"],\n info=\"Enable/disable Mirostat sampling for controlling perplexity.\",\n value=\"Disabled\",\n advanced=True,\n real_time_refresh=True,\n ),\n FloatInput(\n name=\"mirostat_eta\",\n display_name=\"Mirostat Eta\",\n info=\"Learning rate for Mirostat algorithm. (Default: 0.1)\",\n advanced=True,\n ),\n FloatInput(\n name=\"mirostat_tau\",\n display_name=\"Mirostat Tau\",\n info=\"Controls the balance between coherence and diversity of the output. (Default: 5.0)\",\n advanced=True,\n ),\n IntInput(\n name=\"num_ctx\",\n display_name=\"Context Window Size\",\n info=\"Size of the context window for generating tokens. (Default: 2048)\",\n advanced=True,\n ),\n IntInput(\n name=\"num_gpu\",\n display_name=\"Number of GPUs\",\n info=\"Number of GPUs to use for computation. (Default: 1 on macOS, 0 to disable)\",\n advanced=True,\n ),\n IntInput(\n name=\"num_thread\",\n display_name=\"Number of Threads\",\n info=\"Number of threads to use during computation. (Default: detected for optimal performance)\",\n advanced=True,\n ),\n IntInput(\n name=\"repeat_last_n\",\n display_name=\"Repeat Last N\",\n info=\"How far back the model looks to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)\",\n advanced=True,\n ),\n FloatInput(\n name=\"repeat_penalty\",\n display_name=\"Repeat Penalty\",\n info=\"Penalty for repetitions in generated text. (Default: 1.1)\",\n advanced=True,\n ),\n FloatInput(name=\"tfs_z\", display_name=\"TFS Z\", info=\"Tail free sampling value. (Default: 1)\", advanced=True),\n IntInput(name=\"timeout\", display_name=\"Timeout\", info=\"Timeout for the request stream.\", advanced=True),\n IntInput(\n name=\"top_k\", display_name=\"Top K\", info=\"Limits token selection to top K. (Default: 40)\", advanced=True\n ),\n FloatInput(name=\"top_p\", display_name=\"Top P\", info=\"Works together with top-k. (Default: 0.9)\", advanced=True),\n BoolInput(name=\"verbose\", display_name=\"Verbose\", info=\"Whether to print out response text.\", advanced=True),\n MessageTextInput(\n name=\"tags\",\n display_name=\"Tags\",\n info=\"Comma-separated list of tags to add to the run trace.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"stop_tokens\",\n display_name=\"Stop Tokens\",\n info=\"Comma-separated list of tokens to signal the model to stop generating text.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"system\", display_name=\"System\", info=\"System to use for generating text.\", advanced=True\n ),\n BoolInput(\n name=\"tool_model_enabled\",\n display_name=\"Tool Model Enabled\",\n info=\"Whether to enable tool calling in the model.\",\n value=True,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"template\", display_name=\"Template\", info=\"Template to use for generating text.\", advanced=True\n ),\n *LCModelComponent.get_base_inputs(),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # Mapping mirostat settings to their corresponding values\n mirostat_options = {\"Mirostat\": 1, \"Mirostat 2.0\": 2}\n\n # Default to 0 for 'Disabled'\n mirostat_value = mirostat_options.get(self.mirostat, 0)\n\n # Set mirostat_eta and mirostat_tau to None if mirostat is disabled\n if mirostat_value == 0:\n mirostat_eta = None\n mirostat_tau = None\n else:\n mirostat_eta = self.mirostat_eta\n mirostat_tau = self.mirostat_tau\n\n # Mapping system settings to their corresponding values\n llm_params = {\n \"base_url\": self.base_url,\n \"model\": self.model_name,\n \"mirostat\": mirostat_value,\n \"format\": self.format,\n \"metadata\": self.metadata,\n \"tags\": self.tags.split(\",\") if self.tags else None,\n \"mirostat_eta\": mirostat_eta,\n \"mirostat_tau\": mirostat_tau,\n \"num_ctx\": self.num_ctx or None,\n \"num_gpu\": self.num_gpu or None,\n \"num_thread\": self.num_thread or None,\n \"repeat_last_n\": self.repeat_last_n or None,\n \"repeat_penalty\": self.repeat_penalty or None,\n \"temperature\": self.temperature or None,\n \"stop\": self.stop_tokens.split(\",\") if self.stop_tokens else None,\n \"system\": self.system,\n \"tfs_z\": self.tfs_z or None,\n \"timeout\": self.timeout or None,\n \"top_k\": self.top_k or None,\n \"top_p\": self.top_p or None,\n \"verbose\": self.verbose,\n \"template\": self.template,\n }\n\n # Remove parameters with None values\n llm_params = {k: v for k, v in llm_params.items() if v is not None}\n\n try:\n output = ChatOllama(**llm_params)\n except Exception as e:\n msg = (\n \"Unable to connect to the Ollama API. \",\n \"Please verify the base URL, ensure the relevant Ollama model is pulled, and try again.\",\n )\n raise ValueError(msg) from e\n\n return output\n\n async def is_valid_ollama_url(self, url: str) -> bool:\n try:\n async with httpx.AsyncClient() as client:\n return (await client.get(urljoin(url, \"api/tags\"))).status_code == HTTP_STATUS_OK\n except httpx.RequestError:\n return False\n\n async def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None):\n if field_name == \"mirostat\":\n if field_value == \"Disabled\":\n build_config[\"mirostat_eta\"][\"advanced\"] = True\n build_config[\"mirostat_tau\"][\"advanced\"] = True\n build_config[\"mirostat_eta\"][\"value\"] = None\n build_config[\"mirostat_tau\"][\"value\"] = None\n\n else:\n build_config[\"mirostat_eta\"][\"advanced\"] = False\n build_config[\"mirostat_tau\"][\"advanced\"] = False\n\n if field_value == \"Mirostat 2.0\":\n build_config[\"mirostat_eta\"][\"value\"] = 0.2\n build_config[\"mirostat_tau\"][\"value\"] = 10\n else:\n build_config[\"mirostat_eta\"][\"value\"] = 0.1\n build_config[\"mirostat_tau\"][\"value\"] = 5\n\n if field_name in {\"base_url\", \"model_name\"}:\n if build_config[\"base_url\"].get(\"load_from_db\", False):\n base_url_value = await self.get_variables(build_config[\"base_url\"].get(\"value\", \"\"), \"base_url\")\n else:\n base_url_value = build_config[\"base_url\"].get(\"value\", \"\")\n\n if not await self.is_valid_ollama_url(base_url_value):\n # Check if any URL in the list is valid\n valid_url = \"\"\n check_urls = URL_LIST\n if self.base_url:\n check_urls = [self.base_url, *URL_LIST]\n for url in check_urls:\n if await self.is_valid_ollama_url(url):\n valid_url = url\n break\n if valid_url != \"\":\n build_config[\"base_url\"][\"value\"] = valid_url\n else:\n msg = \"No valid Ollama URL found.\"\n raise ValueError(msg)\n if field_name in {\"model_name\", \"base_url\", \"tool_model_enabled\"}:\n if await self.is_valid_ollama_url(self.base_url):\n tool_model_enabled = build_config[\"tool_model_enabled\"].get(\"value\", False) or self.tool_model_enabled\n build_config[\"model_name\"][\"options\"] = await self.get_models(\n self.base_url, tool_model_enabled=tool_model_enabled\n )\n elif await self.is_valid_ollama_url(build_config[\"base_url\"].get(\"value\", \"\")):\n tool_model_enabled = build_config[\"tool_model_enabled\"].get(\"value\", False) or self.tool_model_enabled\n build_config[\"model_name\"][\"options\"] = await self.get_models(\n build_config[\"base_url\"].get(\"value\", \"\"), tool_model_enabled=tool_model_enabled\n )\n else:\n build_config[\"model_name\"][\"options\"] = []\n if field_name == \"keep_alive_flag\":\n if field_value == \"Keep\":\n build_config[\"keep_alive\"][\"value\"] = \"-1\"\n build_config[\"keep_alive\"][\"advanced\"] = True\n elif field_value == \"Immediately\":\n build_config[\"keep_alive\"][\"value\"] = \"0\"\n build_config[\"keep_alive\"][\"advanced\"] = True\n else:\n build_config[\"keep_alive\"][\"advanced\"] = False\n\n return build_config\n\n async def get_models(self, base_url_value: str, *, tool_model_enabled: bool | None = None) -> list[str]:\n \"\"\"Fetches a list of models from the Ollama API that do not have the \"embedding\" capability.\n\n Args:\n base_url_value (str): The base URL of the Ollama API.\n tool_model_enabled (bool | None, optional): If True, filters the models further to include\n only those that support tool calling. Defaults to None.\n\n Returns:\n list[str]: A list of model names that do not have the \"embedding\" capability. If\n `tool_model_enabled` is True, only models supporting tool calling are included.\n\n Raises:\n ValueError: If there is an issue with the API request or response, or if the model\n names cannot be retrieved.\n \"\"\"\n try:\n # Normalize the base URL to avoid the repeated \"/\" at the end\n base_url = base_url_value.rstrip(\"/\") + \"/\"\n\n # Ollama REST API to return models\n tags_url = urljoin(base_url, \"api/tags\")\n\n # Ollama REST API to return model capabilities\n show_url = urljoin(base_url, \"api/show\")\n\n async with httpx.AsyncClient() as client:\n # Fetch available models\n tags_response = await client.get(tags_url)\n tags_response.raise_for_status()\n models = tags_response.json()\n if asyncio.iscoroutine(models):\n models = await models\n await logger.adebug(f\"Available models: {models}\")\n\n # Filter models that are NOT embedding models\n model_ids = []\n for model in models[self.JSON_MODELS_KEY]:\n model_name = model[self.JSON_NAME_KEY]\n await logger.adebug(f\"Checking model: {model_name}\")\n\n payload = {\"model\": model_name}\n show_response = await client.post(show_url, json=payload)\n show_response.raise_for_status()\n json_data = show_response.json()\n if asyncio.iscoroutine(json_data):\n json_data = await json_data\n capabilities = json_data.get(self.JSON_CAPABILITIES_KEY, [])\n await logger.adebug(f\"Model: {model_name}, Capabilities: {capabilities}\")\n\n if self.DESIRED_CAPABILITY in capabilities and (\n not tool_model_enabled or self.TOOL_CALLING_CAPABILITY in capabilities\n ):\n model_ids.append(model_name)\n\n except (httpx.RequestError, ValueError) as e:\n msg = \"Could not get model names from Ollama.\"\n raise ValueError(msg) from e\n\n return model_ids\n",
+ "fileTypes": [],
+ "file_path": "",
+ "password": false,
+ "name": "code",
+ "advanced": true,
+ "dynamic": true,
+ "info": "",
+ "load_from_db": false,
+ "title_case": false
+ },
+ "format": {
+ "tool_mode": false,
+ "trace_as_input": true,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "format",
+ "value": "",
+ "display_name": "Format",
+ "advanced": true,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "Specify the format of the output (e.g., json).",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "MessageTextInput"
+ },
+ "input_value": {
+ "trace_as_input": true,
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "input_value",
+ "value": "",
+ "display_name": "Input",
+ "advanced": false,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "MessageInput"
+ },
+ "metadata": {
+ "tool_mode": false,
+ "trace_as_input": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "metadata",
+ "value": {},
+ "display_name": "Metadata",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Metadata to add to the run trace.",
+ "title_case": false,
+ "type": "dict",
+ "_input_type": "DictInput"
+ },
+ "mirostat": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "options": [
+ "Disabled",
+ "Mirostat",
+ "Mirostat 2.0"
+ ],
+ "options_metadata": [],
+ "combobox": false,
+ "dialog_inputs": {},
+ "toggle": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "mirostat",
+ "value": "Disabled",
+ "display_name": "Mirostat",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Enable/disable Mirostat sampling for controlling perplexity.",
+ "real_time_refresh": true,
+ "title_case": false,
+ "external_options": {},
+ "type": "str",
+ "_input_type": "DropdownInput"
+ },
+ "mirostat_eta": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "mirostat_eta",
+ "value": "",
+ "display_name": "Mirostat Eta",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Learning rate for Mirostat algorithm. (Default: 0.1)",
+ "title_case": false,
+ "type": "float",
+ "_input_type": "FloatInput"
+ },
+ "mirostat_tau": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "mirostat_tau",
+ "value": "",
+ "display_name": "Mirostat Tau",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Controls the balance between coherence and diversity of the output. (Default: 5.0)",
+ "title_case": false,
+ "type": "float",
+ "_input_type": "FloatInput"
+ },
+ "model_name": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "options": [],
+ "options_metadata": [],
+ "combobox": false,
+ "dialog_inputs": {},
+ "toggle": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "model_name",
+ "value": "",
+ "display_name": "Model Name",
+ "advanced": false,
+ "dynamic": false,
+ "info": "Refer to https://ollama.com/library for more models.",
+ "real_time_refresh": true,
+ "refresh_button": true,
+ "title_case": false,
+ "external_options": {},
+ "type": "str",
+ "_input_type": "DropdownInput"
+ },
+ "num_ctx": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "num_ctx",
+ "value": "",
+ "display_name": "Context Window Size",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Size of the context window for generating tokens. (Default: 2048)",
+ "title_case": false,
+ "type": "int",
+ "_input_type": "IntInput"
+ },
+ "num_gpu": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "num_gpu",
+ "value": "",
+ "display_name": "Number of GPUs",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Number of GPUs to use for computation. (Default: 1 on macOS, 0 to disable)",
+ "title_case": false,
+ "type": "int",
+ "_input_type": "IntInput"
+ },
+ "num_thread": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "num_thread",
+ "value": "",
+ "display_name": "Number of Threads",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Number of threads to use during computation. (Default: detected for optimal performance)",
+ "title_case": false,
+ "type": "int",
+ "_input_type": "IntInput"
+ },
+ "repeat_last_n": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "repeat_last_n",
+ "value": "",
+ "display_name": "Repeat Last N",
+ "advanced": true,
+ "dynamic": false,
+ "info": "How far back the model looks to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)",
+ "title_case": false,
+ "type": "int",
+ "_input_type": "IntInput"
+ },
+ "repeat_penalty": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "repeat_penalty",
+ "value": "",
+ "display_name": "Repeat Penalty",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Penalty for repetitions in generated text. (Default: 1.1)",
+ "title_case": false,
+ "type": "float",
+ "_input_type": "FloatInput"
+ },
+ "stop_tokens": {
+ "tool_mode": false,
+ "trace_as_input": true,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "stop_tokens",
+ "value": "",
+ "display_name": "Stop Tokens",
+ "advanced": true,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "Comma-separated list of tokens to signal the model to stop generating text.",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "MessageTextInput"
+ },
+ "stream": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "stream",
+ "value": false,
+ "display_name": "Stream",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Stream the response from the model. Streaming works only in Chat.",
+ "title_case": false,
+ "type": "bool",
+ "_input_type": "BoolInput"
+ },
+ "system": {
+ "tool_mode": false,
+ "trace_as_input": true,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "system",
+ "value": "",
+ "display_name": "System",
+ "advanced": true,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "System to use for generating text.",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "MessageTextInput"
+ },
+ "system_message": {
+ "tool_mode": false,
+ "trace_as_input": true,
+ "multiline": true,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "system_message",
+ "value": "",
+ "display_name": "System Message",
+ "advanced": false,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "System message to pass to the model.",
+ "title_case": false,
+ "copy_field": false,
+ "type": "str",
+ "_input_type": "MultilineInput"
+ },
+ "tags": {
+ "tool_mode": false,
+ "trace_as_input": true,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "tags",
+ "value": "",
+ "display_name": "Tags",
+ "advanced": true,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "Comma-separated list of tags to add to the run trace.",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "MessageTextInput"
+ },
+ "temperature": {
+ "tool_mode": false,
+ "min_label": "",
+ "max_label": "",
+ "min_label_icon": "",
+ "max_label_icon": "",
+ "slider_buttons": false,
+ "slider_buttons_options": [],
+ "slider_input": false,
+ "range_spec": {
+ "step_type": "float",
+ "min": 0,
+ "max": 1,
+ "step": 0.01
},
- {
- "allows_loop": false,
- "cache": true,
- "display_name": "Language Model",
- "group_outputs": false,
- "method": "build_model",
- "name": "model_output",
- "options": null,
- "required_inputs": null,
- "tool_mode": true,
- "types": [
- "LanguageModel"
- ],
- "value": "__UNDEFINED__"
- }
- ],
- "pinned": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "temperature",
+ "value": 0.1,
+ "display_name": "Temperature",
+ "advanced": true,
+ "dynamic": false,
+ "info": "",
+ "title_case": false,
+ "type": "slider",
+ "_input_type": "SliderInput"
+ },
"template": {
- "_type": "Component",
- "base_url": {
- "_input_type": "MessageTextInput",
- "advanced": false,
- "display_name": "Base URL",
- "dynamic": false,
- "info": "Endpoint of the Ollama API.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": true,
- "name": "base_url",
- "placeholder": "",
- "real_time_refresh": true,
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": "OLLAMA_BASE_URL"
- },
- "code": {
- "advanced": true,
- "dynamic": true,
- "fileTypes": [],
- "file_path": "",
- "info": "",
- "list": false,
- "load_from_db": false,
- "multiline": true,
- "name": "code",
- "password": false,
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "type": "code",
- "value": "import asyncio\nfrom typing import Any\nfrom urllib.parse import urljoin\n\nimport httpx\nfrom langchain_ollama import ChatOllama\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.ollama_constants import URL_LIST\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.io import BoolInput, DictInput, DropdownInput, FloatInput, IntInput, MessageTextInput, SliderInput\nfrom langflow.logging import logger\n\nHTTP_STATUS_OK = 200\n\n\nclass ChatOllamaComponent(LCModelComponent):\n display_name = \"Ollama\"\n description = \"Generate text using Ollama Local LLMs.\"\n icon = \"Ollama\"\n name = \"OllamaModel\"\n\n # Define constants for JSON keys\n JSON_MODELS_KEY = \"models\"\n JSON_NAME_KEY = \"name\"\n JSON_CAPABILITIES_KEY = \"capabilities\"\n DESIRED_CAPABILITY = \"completion\"\n TOOL_CALLING_CAPABILITY = \"tools\"\n\n inputs = [\n MessageTextInput(\n name=\"base_url\",\n display_name=\"Base URL\",\n info=\"Endpoint of the Ollama API.\",\n value=\"\",\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=[],\n info=\"Refer to https://ollama.com/library for more models.\",\n refresh_button=True,\n real_time_refresh=True,\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n MessageTextInput(\n name=\"format\", display_name=\"Format\", info=\"Specify the format of the output (e.g., json).\", advanced=True\n ),\n DictInput(name=\"metadata\", display_name=\"Metadata\", info=\"Metadata to add to the run trace.\", advanced=True),\n DropdownInput(\n name=\"mirostat\",\n display_name=\"Mirostat\",\n options=[\"Disabled\", \"Mirostat\", \"Mirostat 2.0\"],\n info=\"Enable/disable Mirostat sampling for controlling perplexity.\",\n value=\"Disabled\",\n advanced=True,\n real_time_refresh=True,\n ),\n FloatInput(\n name=\"mirostat_eta\",\n display_name=\"Mirostat Eta\",\n info=\"Learning rate for Mirostat algorithm. (Default: 0.1)\",\n advanced=True,\n ),\n FloatInput(\n name=\"mirostat_tau\",\n display_name=\"Mirostat Tau\",\n info=\"Controls the balance between coherence and diversity of the output. (Default: 5.0)\",\n advanced=True,\n ),\n IntInput(\n name=\"num_ctx\",\n display_name=\"Context Window Size\",\n info=\"Size of the context window for generating tokens. (Default: 2048)\",\n advanced=True,\n ),\n IntInput(\n name=\"num_gpu\",\n display_name=\"Number of GPUs\",\n info=\"Number of GPUs to use for computation. (Default: 1 on macOS, 0 to disable)\",\n advanced=True,\n ),\n IntInput(\n name=\"num_thread\",\n display_name=\"Number of Threads\",\n info=\"Number of threads to use during computation. (Default: detected for optimal performance)\",\n advanced=True,\n ),\n IntInput(\n name=\"repeat_last_n\",\n display_name=\"Repeat Last N\",\n info=\"How far back the model looks to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)\",\n advanced=True,\n ),\n FloatInput(\n name=\"repeat_penalty\",\n display_name=\"Repeat Penalty\",\n info=\"Penalty for repetitions in generated text. (Default: 1.1)\",\n advanced=True,\n ),\n FloatInput(name=\"tfs_z\", display_name=\"TFS Z\", info=\"Tail free sampling value. (Default: 1)\", advanced=True),\n IntInput(name=\"timeout\", display_name=\"Timeout\", info=\"Timeout for the request stream.\", advanced=True),\n IntInput(\n name=\"top_k\", display_name=\"Top K\", info=\"Limits token selection to top K. (Default: 40)\", advanced=True\n ),\n FloatInput(name=\"top_p\", display_name=\"Top P\", info=\"Works together with top-k. (Default: 0.9)\", advanced=True),\n BoolInput(name=\"verbose\", display_name=\"Verbose\", info=\"Whether to print out response text.\", advanced=True),\n MessageTextInput(\n name=\"tags\",\n display_name=\"Tags\",\n info=\"Comma-separated list of tags to add to the run trace.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"stop_tokens\",\n display_name=\"Stop Tokens\",\n info=\"Comma-separated list of tokens to signal the model to stop generating text.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"system\", display_name=\"System\", info=\"System to use for generating text.\", advanced=True\n ),\n BoolInput(\n name=\"tool_model_enabled\",\n display_name=\"Tool Model Enabled\",\n info=\"Whether to enable tool calling in the model.\",\n value=True,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"template\", display_name=\"Template\", info=\"Template to use for generating text.\", advanced=True\n ),\n *LCModelComponent._base_inputs,\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n # Mapping mirostat settings to their corresponding values\n mirostat_options = {\"Mirostat\": 1, \"Mirostat 2.0\": 2}\n\n # Default to 0 for 'Disabled'\n mirostat_value = mirostat_options.get(self.mirostat, 0)\n\n # Set mirostat_eta and mirostat_tau to None if mirostat is disabled\n if mirostat_value == 0:\n mirostat_eta = None\n mirostat_tau = None\n else:\n mirostat_eta = self.mirostat_eta\n mirostat_tau = self.mirostat_tau\n\n # Mapping system settings to their corresponding values\n llm_params = {\n \"base_url\": self.base_url,\n \"model\": self.model_name,\n \"mirostat\": mirostat_value,\n \"format\": self.format,\n \"metadata\": self.metadata,\n \"tags\": self.tags.split(\",\") if self.tags else None,\n \"mirostat_eta\": mirostat_eta,\n \"mirostat_tau\": mirostat_tau,\n \"num_ctx\": self.num_ctx or None,\n \"num_gpu\": self.num_gpu or None,\n \"num_thread\": self.num_thread or None,\n \"repeat_last_n\": self.repeat_last_n or None,\n \"repeat_penalty\": self.repeat_penalty or None,\n \"temperature\": self.temperature or None,\n \"stop\": self.stop_tokens.split(\",\") if self.stop_tokens else None,\n \"system\": self.system,\n \"tfs_z\": self.tfs_z or None,\n \"timeout\": self.timeout or None,\n \"top_k\": self.top_k or None,\n \"top_p\": self.top_p or None,\n \"verbose\": self.verbose,\n \"template\": self.template,\n }\n\n # Remove parameters with None values\n llm_params = {k: v for k, v in llm_params.items() if v is not None}\n\n try:\n output = ChatOllama(**llm_params)\n except Exception as e:\n msg = (\n \"Unable to connect to the Ollama API. \",\n \"Please verify the base URL, ensure the relevant Ollama model is pulled, and try again.\",\n )\n raise ValueError(msg) from e\n\n return output\n\n async def is_valid_ollama_url(self, url: str) -> bool:\n try:\n async with httpx.AsyncClient() as client:\n return (await client.get(urljoin(url, \"api/tags\"))).status_code == HTTP_STATUS_OK\n except httpx.RequestError:\n return False\n\n async def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None):\n if field_name == \"mirostat\":\n if field_value == \"Disabled\":\n build_config[\"mirostat_eta\"][\"advanced\"] = True\n build_config[\"mirostat_tau\"][\"advanced\"] = True\n build_config[\"mirostat_eta\"][\"value\"] = None\n build_config[\"mirostat_tau\"][\"value\"] = None\n\n else:\n build_config[\"mirostat_eta\"][\"advanced\"] = False\n build_config[\"mirostat_tau\"][\"advanced\"] = False\n\n if field_value == \"Mirostat 2.0\":\n build_config[\"mirostat_eta\"][\"value\"] = 0.2\n build_config[\"mirostat_tau\"][\"value\"] = 10\n else:\n build_config[\"mirostat_eta\"][\"value\"] = 0.1\n build_config[\"mirostat_tau\"][\"value\"] = 5\n\n if field_name in {\"base_url\", \"model_name\"}:\n if build_config[\"base_url\"].get(\"load_from_db\", False):\n base_url_value = await self.get_variables(build_config[\"base_url\"].get(\"value\", \"\"), \"base_url\")\n else:\n base_url_value = build_config[\"base_url\"].get(\"value\", \"\")\n\n if not await self.is_valid_ollama_url(base_url_value):\n # Check if any URL in the list is valid\n valid_url = \"\"\n check_urls = URL_LIST\n if self.base_url:\n check_urls = [self.base_url, *URL_LIST]\n for url in check_urls:\n if await self.is_valid_ollama_url(url):\n valid_url = url\n break\n if valid_url != \"\":\n build_config[\"base_url\"][\"value\"] = valid_url\n else:\n msg = \"No valid Ollama URL found.\"\n raise ValueError(msg)\n if field_name in {\"model_name\", \"base_url\", \"tool_model_enabled\"}:\n if await self.is_valid_ollama_url(self.base_url):\n tool_model_enabled = build_config[\"tool_model_enabled\"].get(\"value\", False) or self.tool_model_enabled\n build_config[\"model_name\"][\"options\"] = await self.get_models(\n self.base_url, tool_model_enabled=tool_model_enabled\n )\n elif await self.is_valid_ollama_url(build_config[\"base_url\"].get(\"value\", \"\")):\n tool_model_enabled = build_config[\"tool_model_enabled\"].get(\"value\", False) or self.tool_model_enabled\n build_config[\"model_name\"][\"options\"] = await self.get_models(\n build_config[\"base_url\"].get(\"value\", \"\"), tool_model_enabled=tool_model_enabled\n )\n else:\n build_config[\"model_name\"][\"options\"] = []\n if field_name == \"keep_alive_flag\":\n if field_value == \"Keep\":\n build_config[\"keep_alive\"][\"value\"] = \"-1\"\n build_config[\"keep_alive\"][\"advanced\"] = True\n elif field_value == \"Immediately\":\n build_config[\"keep_alive\"][\"value\"] = \"0\"\n build_config[\"keep_alive\"][\"advanced\"] = True\n else:\n build_config[\"keep_alive\"][\"advanced\"] = False\n\n return build_config\n\n async def get_models(self, base_url_value: str, *, tool_model_enabled: bool | None = None) -> list[str]:\n \"\"\"Fetches a list of models from the Ollama API that do not have the \"embedding\" capability.\n\n Args:\n base_url_value (str): The base URL of the Ollama API.\n tool_model_enabled (bool | None, optional): If True, filters the models further to include\n only those that support tool calling. Defaults to None.\n\n Returns:\n list[str]: A list of model names that do not have the \"embedding\" capability. If\n `tool_model_enabled` is True, only models supporting tool calling are included.\n\n Raises:\n ValueError: If there is an issue with the API request or response, or if the model\n names cannot be retrieved.\n \"\"\"\n try:\n # Normalize the base URL to avoid the repeated \"/\" at the end\n base_url = base_url_value.rstrip(\"/\") + \"/\"\n\n # Ollama REST API to return models\n tags_url = urljoin(base_url, \"api/tags\")\n\n # Ollama REST API to return model capabilities\n show_url = urljoin(base_url, \"api/show\")\n\n async with httpx.AsyncClient() as client:\n # Fetch available models\n tags_response = await client.get(tags_url)\n tags_response.raise_for_status()\n models = tags_response.json()\n if asyncio.iscoroutine(models):\n models = await models\n await logger.adebug(f\"Available models: {models}\")\n\n # Filter models that are NOT embedding models\n model_ids = []\n for model in models[self.JSON_MODELS_KEY]:\n model_name = model[self.JSON_NAME_KEY]\n await logger.adebug(f\"Checking model: {model_name}\")\n\n payload = {\"model\": model_name}\n show_response = await client.post(show_url, json=payload)\n show_response.raise_for_status()\n json_data = show_response.json()\n if asyncio.iscoroutine(json_data):\n json_data = await json_data\n capabilities = json_data.get(self.JSON_CAPABILITIES_KEY, [])\n await logger.adebug(f\"Model: {model_name}, Capabilities: {capabilities}\")\n\n if self.DESIRED_CAPABILITY in capabilities and (\n not tool_model_enabled or self.TOOL_CALLING_CAPABILITY in capabilities\n ):\n model_ids.append(model_name)\n\n except (httpx.RequestError, ValueError) as e:\n msg = \"Could not get model names from Ollama.\"\n raise ValueError(msg) from e\n\n return model_ids\n"
- },
- "format": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Format",
- "dynamic": false,
- "info": "Specify the format of the output (e.g., json).",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "format",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "input_value": {
- "_input_type": "MessageInput",
- "advanced": false,
- "display_name": "Input",
- "dynamic": false,
- "info": "",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "input_value",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "metadata": {
- "_input_type": "DictInput",
- "advanced": true,
- "display_name": "Metadata",
- "dynamic": false,
- "info": "Metadata to add to the run trace.",
- "list": false,
- "list_add_label": "Add More",
- "name": "metadata",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "type": "dict",
- "value": {}
- },
- "mirostat": {
- "_input_type": "DropdownInput",
- "advanced": true,
- "combobox": false,
- "dialog_inputs": {},
- "display_name": "Mirostat",
- "dynamic": false,
- "info": "Enable/disable Mirostat sampling for controlling perplexity.",
- "name": "mirostat",
- "options": [
- "Disabled",
- "Mirostat",
- "Mirostat 2.0"
- ],
- "options_metadata": [],
- "placeholder": "",
- "real_time_refresh": true,
- "required": false,
- "show": true,
- "title_case": false,
- "toggle": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "str",
- "value": "Disabled"
- },
- "mirostat_eta": {
- "_input_type": "FloatInput",
- "advanced": true,
- "display_name": "Mirostat Eta",
- "dynamic": false,
- "info": "Learning rate for Mirostat algorithm. (Default: 0.1)",
- "list": false,
- "list_add_label": "Add More",
- "name": "mirostat_eta",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "float",
- "value": ""
- },
- "mirostat_tau": {
- "_input_type": "FloatInput",
- "advanced": true,
- "display_name": "Mirostat Tau",
- "dynamic": false,
- "info": "Controls the balance between coherence and diversity of the output. (Default: 5.0)",
- "list": false,
- "list_add_label": "Add More",
- "name": "mirostat_tau",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "float",
- "value": ""
- },
- "model_name": {
- "_input_type": "DropdownInput",
- "advanced": false,
- "combobox": false,
- "dialog_inputs": {},
- "display_name": "Model Name",
- "dynamic": false,
- "info": "Refer to https://ollama.com/library for more models.",
- "name": "model_name",
- "options": [
- "qwen3:4b"
- ],
- "options_metadata": [],
- "placeholder": "",
- "real_time_refresh": true,
- "refresh_button": true,
- "required": false,
- "show": true,
- "title_case": false,
- "toggle": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "str",
- "value": "qwen3:4b"
- },
- "num_ctx": {
- "_input_type": "IntInput",
- "advanced": true,
- "display_name": "Context Window Size",
- "dynamic": false,
- "info": "Size of the context window for generating tokens. (Default: 2048)",
- "list": false,
- "list_add_label": "Add More",
- "name": "num_ctx",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "int",
- "value": ""
- },
- "num_gpu": {
- "_input_type": "IntInput",
- "advanced": true,
- "display_name": "Number of GPUs",
- "dynamic": false,
- "info": "Number of GPUs to use for computation. (Default: 1 on macOS, 0 to disable)",
- "list": false,
- "list_add_label": "Add More",
- "name": "num_gpu",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "int",
- "value": ""
- },
- "num_thread": {
- "_input_type": "IntInput",
- "advanced": true,
- "display_name": "Number of Threads",
- "dynamic": false,
- "info": "Number of threads to use during computation. (Default: detected for optimal performance)",
- "list": false,
- "list_add_label": "Add More",
- "name": "num_thread",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "int",
- "value": ""
- },
- "repeat_last_n": {
- "_input_type": "IntInput",
- "advanced": true,
- "display_name": "Repeat Last N",
- "dynamic": false,
- "info": "How far back the model looks to prevent repetition. (Default: 64, 0 = disabled, -1 = num_ctx)",
- "list": false,
- "list_add_label": "Add More",
- "name": "repeat_last_n",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "int",
- "value": ""
- },
- "repeat_penalty": {
- "_input_type": "FloatInput",
- "advanced": true,
- "display_name": "Repeat Penalty",
- "dynamic": false,
- "info": "Penalty for repetitions in generated text. (Default: 1.1)",
- "list": false,
- "list_add_label": "Add More",
- "name": "repeat_penalty",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "float",
- "value": ""
- },
- "stop_tokens": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Stop Tokens",
- "dynamic": false,
- "info": "Comma-separated list of tokens to signal the model to stop generating text.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "stop_tokens",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "stream": {
- "_input_type": "BoolInput",
- "advanced": true,
- "display_name": "Stream",
- "dynamic": false,
- "info": "Stream the response from the model. Streaming works only in Chat.",
- "list": false,
- "list_add_label": "Add More",
- "name": "stream",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "bool",
- "value": false
- },
- "system": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "System",
- "dynamic": false,
- "info": "System to use for generating text.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "system",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "system_message": {
- "_input_type": "MultilineInput",
- "advanced": false,
- "copy_field": false,
- "display_name": "System Message",
- "dynamic": false,
- "info": "System message to pass to the model.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "multiline": true,
- "name": "system_message",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "tags": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Tags",
- "dynamic": false,
- "info": "Comma-separated list of tags to add to the run trace.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "tags",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "temperature": {
- "_input_type": "SliderInput",
- "advanced": true,
- "display_name": "Temperature",
- "dynamic": false,
- "info": "",
- "max_label": "",
- "max_label_icon": "",
- "min_label": "",
- "min_label_icon": "",
- "name": "temperature",
- "placeholder": "",
- "range_spec": {
- "max": 1,
- "min": 0,
- "step": 0.01,
- "step_type": "float"
- },
- "required": false,
- "show": true,
- "slider_buttons": false,
- "slider_buttons_options": [],
- "slider_input": false,
- "title_case": false,
- "tool_mode": false,
- "type": "slider",
- "value": 0.1
- },
- "template": {
- "_input_type": "MessageTextInput",
- "advanced": true,
- "display_name": "Template",
- "dynamic": false,
- "info": "Template to use for generating text.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "template",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "tfs_z": {
- "_input_type": "FloatInput",
- "advanced": true,
- "display_name": "TFS Z",
- "dynamic": false,
- "info": "Tail free sampling value. (Default: 1)",
- "list": false,
- "list_add_label": "Add More",
- "name": "tfs_z",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "float",
- "value": ""
- },
- "timeout": {
- "_input_type": "IntInput",
- "advanced": true,
- "display_name": "Timeout",
- "dynamic": false,
- "info": "Timeout for the request stream.",
- "list": false,
- "list_add_label": "Add More",
- "name": "timeout",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "int",
- "value": ""
- },
- "tool_model_enabled": {
- "_input_type": "BoolInput",
- "advanced": false,
- "display_name": "Tool Model Enabled",
- "dynamic": false,
- "info": "Whether to enable tool calling in the model.",
- "list": false,
- "list_add_label": "Add More",
- "name": "tool_model_enabled",
- "placeholder": "",
- "real_time_refresh": true,
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "bool",
- "value": true
- },
- "top_k": {
- "_input_type": "IntInput",
- "advanced": true,
- "display_name": "Top K",
- "dynamic": false,
- "info": "Limits token selection to top K. (Default: 40)",
- "list": false,
- "list_add_label": "Add More",
- "name": "top_k",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "int",
- "value": ""
- },
- "top_p": {
- "_input_type": "FloatInput",
- "advanced": true,
- "display_name": "Top P",
- "dynamic": false,
- "info": "Works together with top-k. (Default: 0.9)",
- "list": false,
- "list_add_label": "Add More",
- "name": "top_p",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "float",
- "value": ""
- },
- "verbose": {
- "_input_type": "BoolInput",
- "advanced": true,
- "display_name": "Verbose",
- "dynamic": false,
- "info": "Whether to print out response text.",
- "list": false,
- "list_add_label": "Add More",
- "name": "verbose",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "bool",
- "value": false
- }
+ "tool_mode": false,
+ "trace_as_input": true,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "template",
+ "value": "",
+ "display_name": "Template",
+ "advanced": true,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "Template to use for generating text.",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "MessageTextInput"
},
- "tool_mode": false
+ "tfs_z": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "tfs_z",
+ "value": "",
+ "display_name": "TFS Z",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Tail free sampling value. (Default: 1)",
+ "title_case": false,
+ "type": "float",
+ "_input_type": "FloatInput"
+ },
+ "timeout": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "timeout",
+ "value": "",
+ "display_name": "Timeout",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Timeout for the request stream.",
+ "title_case": false,
+ "type": "int",
+ "_input_type": "IntInput"
+ },
+ "tool_model_enabled": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "tool_model_enabled",
+ "value": true,
+ "display_name": "Tool Model Enabled",
+ "advanced": false,
+ "dynamic": false,
+ "info": "Whether to enable tool calling in the model.",
+ "real_time_refresh": true,
+ "title_case": false,
+ "type": "bool",
+ "_input_type": "BoolInput"
+ },
+ "top_k": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "top_k",
+ "value": "",
+ "display_name": "Top K",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Limits token selection to top K. (Default: 40)",
+ "title_case": false,
+ "type": "int",
+ "_input_type": "IntInput"
+ },
+ "top_p": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "top_p",
+ "value": "",
+ "display_name": "Top P",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Works together with top-k. (Default: 0.9)",
+ "title_case": false,
+ "type": "float",
+ "_input_type": "FloatInput"
+ },
+ "verbose": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "verbose",
+ "value": false,
+ "display_name": "Verbose",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Whether to print out response text.",
+ "title_case": false,
+ "type": "bool",
+ "_input_type": "BoolInput"
+ }
},
- "selected_output": "text_output",
- "showNode": true,
- "type": "OllamaModel"
+ "description": "Generate text using Ollama Local LLMs.",
+ "icon": "Ollama",
+ "base_classes": [
+ "LanguageModel",
+ "Message"
+ ],
+ "display_name": "Ollama",
+ "documentation": "",
+ "minimized": false,
+ "custom_fields": {},
+ "output_types": [],
+ "pinned": false,
+ "conditional_paths": [],
+ "frozen": false,
+ "outputs": [
+ {
+ "types": [
+ "Message"
+ ],
+ "selected": "Message",
+ "name": "text_output",
+ "display_name": "Model Response",
+ "method": "text_response",
+ "value": "__UNDEFINED__",
+ "cache": true,
+ "required_inputs": null,
+ "allows_loop": false,
+ "group_outputs": false,
+ "options": null,
+ "tool_mode": true
+ },
+ {
+ "types": [
+ "LanguageModel"
+ ],
+ "selected": "LanguageModel",
+ "name": "model_output",
+ "display_name": "Language Model",
+ "method": "build_model",
+ "value": "__UNDEFINED__",
+ "cache": true,
+ "required_inputs": null,
+ "allows_loop": false,
+ "group_outputs": false,
+ "options": null,
+ "tool_mode": true
+ }
+ ],
+ "field_order": [
+ "base_url",
+ "model_name",
+ "temperature",
+ "format",
+ "metadata",
+ "mirostat",
+ "mirostat_eta",
+ "mirostat_tau",
+ "num_ctx",
+ "num_gpu",
+ "num_thread",
+ "repeat_last_n",
+ "repeat_penalty",
+ "tfs_z",
+ "timeout",
+ "top_k",
+ "top_p",
+ "verbose",
+ "tags",
+ "stop_tokens",
+ "system",
+ "tool_model_enabled",
+ "template",
+ "input_value",
+ "system_message",
+ "stream"
+ ],
+ "beta": false,
+ "legacy": false,
+ "edited": false,
+ "metadata": {
+ "keywords": [
+ "model",
+ "llm",
+ "language model",
+ "large language model"
+ ],
+ "module": "lfx.components.ollama.ollama.ChatOllamaComponent",
+ "code_hash": "54de3b5da388",
+ "dependencies": {
+ "total_dependencies": 3,
+ "dependencies": [
+ {
+ "name": "httpx",
+ "version": "0.28.1"
+ },
+ {
+ "name": "langchain_ollama",
+ "version": "0.2.1"
+ },
+ {
+ "name": "lfx",
+ "version": null
+ }
+ ]
+ }
+ },
+ "tool_mode": false,
+ "last_updated": "2025-09-29T18:39:30.798Z",
+ "official": false
},
- "dragging": false,
- "id": "OllamaModel-XDGqZ",
- "measured": {
- "height": 494,
- "width": 320
- },
- "position": {
- "x": 248.08287272472313,
- "y": 216.98088326271431
- },
- "selected": false,
- "type": "genericNode"
- }
\ No newline at end of file
+ "showNode": true,
+ "type": "OllamaModel",
+ "id": "OllamaModel-8Re0J",
+ "selected_output": "text_output"
+ },
+ "id": "OllamaModel-8Re0J",
+ "position": {
+ "x": 0,
+ "y": 0
+ },
+ "type": "genericNode"
+}
\ No newline at end of file
diff --git a/flows/components/watsonx_embedding.json b/flows/components/watsonx_embedding.json
index 850cfb07..1d76c046 100644
--- a/flows/components/watsonx_embedding.json
+++ b/flows/components/watsonx_embedding.json
@@ -1,246 +1,207 @@
{
- "data": {
- "id": "WatsonxEmbeddingsComponent-pJfXI",
+"data": {
"node": {
- "base_classes": [
- "Embeddings"
- ],
- "beta": false,
- "conditional_paths": [],
- "custom_fields": {},
- "description": "Generate embeddings using IBM watsonx.ai models.",
- "display_name": "IBM watsonx.ai Embeddings",
- "documentation": "",
- "edited": false,
- "field_order": [
- "url",
- "project_id",
- "api_key",
- "model_name",
- "truncate_input_tokens",
- "input_text"
- ],
- "frozen": false,
- "icon": "WatsonxAI",
- "last_updated": "2025-09-22T20:11:38.181Z",
- "legacy": false,
- "metadata": {
- "code_hash": "b6c6d50cc7ed",
- "dependencies": {
- "dependencies": [
- {
- "name": "requests",
- "version": "2.32.5"
- },
- {
- "name": "ibm_watsonx_ai",
- "version": "1.3.34"
- },
- {
- "name": "langchain_ibm",
- "version": "0.3.16"
- },
- {
- "name": "pydantic",
- "version": "2.10.6"
- },
- {
- "name": "langflow",
- "version": null
- }
- ],
- "total_dependencies": 5
- },
- "module": "langflow.components.ibm.watsonx_embeddings.WatsonxEmbeddingsComponent"
- },
- "minimized": false,
- "output_types": [],
- "outputs": [
- {
- "allows_loop": false,
- "cache": true,
- "display_name": "Embedding Model",
- "group_outputs": false,
- "method": "build_embeddings",
- "name": "embeddings",
- "options": null,
- "required_inputs": null,
- "selected": "Embeddings",
- "tool_mode": true,
- "types": [
- "Embeddings"
- ],
- "value": "__UNDEFINED__"
- }
- ],
- "pinned": false,
- "template": {
+ "template": {
"_type": "Component",
"api_key": {
- "_input_type": "SecretStrInput",
- "advanced": false,
- "display_name": "API Key",
- "dynamic": false,
- "info": "The API Key to use for the model.",
- "input_types": [],
- "load_from_db": true,
- "name": "api_key",
- "password": true,
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "type": "str",
- "value": "WATSONX_API_KEY"
+ "load_from_db": true,
+ "required": true,
+ "placeholder": "",
+ "show": true,
+ "name": "api_key",
+ "value": "WATSONX_API_KEY",
+ "display_name": "Watsonx API Key",
+ "advanced": false,
+ "input_types": [],
+ "dynamic": false,
+ "info": "The API Key to use for the model.",
+ "title_case": false,
+ "password": true,
+ "type": "str",
+ "_input_type": "SecretStrInput"
},
"code": {
- "advanced": true,
- "dynamic": true,
- "fileTypes": [],
- "file_path": "",
- "info": "",
- "list": false,
- "load_from_db": false,
- "multiline": true,
- "name": "code",
- "password": false,
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "type": "code",
- "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai import APIClient, Credentials\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_ibm import WatsonxEmbeddings\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.embeddings.model import LCEmbeddingsModel\nfrom langflow.field_typing import Embeddings\nfrom langflow.io import BoolInput, DropdownInput, IntInput, SecretStrInput, StrInput\nfrom langflow.logging.logger import logger\nfrom langflow.schema.dotdict import dotdict\n\n\nclass WatsonxEmbeddingsComponent(LCEmbeddingsModel):\n display_name = \"IBM watsonx.ai Embeddings\"\n description = \"Generate embeddings using IBM watsonx.ai models.\"\n icon = \"WatsonxAI\"\n name = \"WatsonxEmbeddingsComponent\"\n\n # models present in all the regions\n _default_models = [\n \"sentence-transformers/all-minilm-l12-v2\",\n \"ibm/slate-125m-english-rtrvr-v2\",\n \"ibm/slate-30m-english-rtrvr-v2\",\n \"intfloat/multilingual-e5-large\",\n ]\n\n inputs = [\n DropdownInput(\n name=\"url\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API.\",\n value=None,\n options=[\n \"https://us-south.ml.cloud.ibm.com\",\n \"https://eu-de.ml.cloud.ibm.com\",\n \"https://eu-gb.ml.cloud.ibm.com\",\n \"https://au-syd.ml.cloud.ibm.com\",\n \"https://jp-tok.ml.cloud.ibm.com\",\n \"https://ca-tor.ml.cloud.ibm.com\",\n ],\n real_time_refresh=True,\n ),\n StrInput(\n name=\"project_id\",\n display_name=\"watsonx project id\",\n info=\"The project ID or deployment space ID that is associated with the foundation model.\",\n required=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"The API Key to use for the model.\",\n required=True,\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=[],\n value=None,\n dynamic=True,\n required=True,\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n ),\n ]\n\n @staticmethod\n def fetch_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WatsonxEmbeddingsComponent._default_models\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):\n \"\"\"Update model options when URL or API key changes.\"\"\"\n logger.debug(\n \"Updating build config. Field name: %s, Field value: %s\",\n field_name,\n field_value,\n )\n\n if field_name == \"url\" and field_value:\n try:\n models = self.fetch_models(base_url=build_config.url.value)\n build_config.model_name.options = models\n if build_config.model_name.value:\n build_config.model_name.value = models[0]\n info_message = f\"Updated model options: {len(models)} models found in {build_config.url.value}\"\n logger.info(info_message)\n except Exception: # noqa: BLE001\n logger.exception(\"Error updating model options.\")\n\n def build_embeddings(self) -> Embeddings:\n credentials = Credentials(\n api_key=SecretStr(self.api_key).get_secret_value(),\n url=self.url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n return WatsonxEmbeddings(\n model_id=self.model_name,\n params=params,\n watsonx_client=api_client,\n project_id=self.project_id,\n )\n"
+ "type": "code",
+ "required": true,
+ "placeholder": "",
+ "list": false,
+ "show": true,
+ "multiline": true,
+ "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai import APIClient, Credentials\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_ibm import WatsonxEmbeddings\nfrom pydantic.v1 import SecretStr\n\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import BoolInput, DropdownInput, IntInput, SecretStrInput, StrInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\n\n\nclass WatsonxEmbeddingsComponent(LCEmbeddingsModel):\n display_name = \"IBM watsonx.ai Embeddings\"\n description = \"Generate embeddings using IBM watsonx.ai models.\"\n icon = \"WatsonxAI\"\n name = \"WatsonxEmbeddingsComponent\"\n\n # models present in all the regions\n _default_models = [\n \"sentence-transformers/all-minilm-l12-v2\",\n \"ibm/slate-125m-english-rtrvr-v2\",\n \"ibm/slate-30m-english-rtrvr-v2\",\n \"intfloat/multilingual-e5-large\",\n ]\n\n inputs = [\n DropdownInput(\n name=\"url\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API.\",\n value=None,\n options=[\n \"https://us-south.ml.cloud.ibm.com\",\n \"https://eu-de.ml.cloud.ibm.com\",\n \"https://eu-gb.ml.cloud.ibm.com\",\n \"https://au-syd.ml.cloud.ibm.com\",\n \"https://jp-tok.ml.cloud.ibm.com\",\n \"https://ca-tor.ml.cloud.ibm.com\",\n ],\n real_time_refresh=True,\n ),\n StrInput(\n name=\"project_id\",\n display_name=\"watsonx project id\",\n info=\"The project ID or deployment space ID that is associated with the foundation model.\",\n required=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Watsonx API Key\",\n info=\"The API Key to use for the model.\",\n required=True,\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=[],\n value=None,\n dynamic=True,\n required=True,\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n ),\n ]\n\n @staticmethod\n def fetch_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WatsonxEmbeddingsComponent._default_models\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):\n \"\"\"Update model options when URL or API key changes.\"\"\"\n logger.debug(\n \"Updating build config. Field name: %s, Field value: %s\",\n field_name,\n field_value,\n )\n\n if field_name == \"url\" and field_value:\n try:\n models = self.fetch_models(base_url=build_config.url.value)\n build_config.model_name.options = models\n if build_config.model_name.value:\n build_config.model_name.value = models[0]\n info_message = f\"Updated model options: {len(models)} models found in {build_config.url.value}\"\n logger.info(info_message)\n except Exception: # noqa: BLE001\n logger.exception(\"Error updating model options.\")\n\n def build_embeddings(self) -> Embeddings:\n credentials = Credentials(\n api_key=SecretStr(self.api_key).get_secret_value(),\n url=self.url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n return WatsonxEmbeddings(\n model_id=self.model_name,\n params=params,\n watsonx_client=api_client,\n project_id=self.project_id,\n )\n",
+ "fileTypes": [],
+ "file_path": "",
+ "password": false,
+ "name": "code",
+ "advanced": true,
+ "dynamic": true,
+ "info": "",
+ "load_from_db": false,
+ "title_case": false
},
"input_text": {
- "_input_type": "BoolInput",
- "advanced": true,
- "display_name": "Include the original text in the output",
- "dynamic": false,
- "info": "",
- "list": false,
- "list_add_label": "Add More",
- "name": "input_text",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "bool",
- "value": true
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "input_text",
+ "value": true,
+ "display_name": "Include the original text in the output",
+ "advanced": true,
+ "dynamic": false,
+ "info": "",
+ "title_case": false,
+ "type": "bool",
+ "_input_type": "BoolInput"
},
"model_name": {
- "_input_type": "DropdownInput",
- "advanced": false,
- "combobox": false,
- "dialog_inputs": {},
- "display_name": "Model Name",
- "dynamic": true,
- "info": "",
- "name": "model_name",
- "options": [
- "ibm/granite-embedding-107m-multilingual",
- "ibm/granite-embedding-278m-multilingual",
- "ibm/slate-125m-english-rtrvr",
- "ibm/slate-125m-english-rtrvr-v2",
- "ibm/slate-30m-english-rtrvr",
- "ibm/slate-30m-english-rtrvr-v2",
- "intfloat/multilingual-e5-large",
- "sentence-transformers/all-minilm-l6-v2"
- ],
- "options_metadata": [],
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "toggle": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "str",
- "value": "ibm/granite-embedding-107m-multilingual"
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "options": [],
+ "options_metadata": [],
+ "combobox": false,
+ "dialog_inputs": {},
+ "toggle": false,
+ "required": true,
+ "placeholder": "",
+ "show": true,
+ "name": "model_name",
+ "display_name": "Model Name",
+ "advanced": false,
+ "dynamic": true,
+ "info": "",
+ "title_case": false,
+ "external_options": {},
+ "type": "str",
+ "_input_type": "DropdownInput"
},
"project_id": {
- "_input_type": "StrInput",
- "advanced": false,
- "display_name": "watsonx project id",
- "dynamic": false,
- "info": "The project ID or deployment space ID that is associated with the foundation model.",
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": true,
- "name": "project_id",
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "str",
- "value": "WATSONX_PROJECT_ID"
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "load_from_db": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": true,
+ "placeholder": "",
+ "show": true,
+ "name": "project_id",
+ "value": "WATSONX_PROJECT_ID",
+ "display_name": "watsonx project id",
+ "advanced": false,
+ "dynamic": false,
+ "info": "The project ID or deployment space ID that is associated with the foundation model.",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "StrInput"
},
"truncate_input_tokens": {
- "_input_type": "IntInput",
- "advanced": true,
- "display_name": "Truncate Input Tokens",
- "dynamic": false,
- "info": "",
- "list": false,
- "list_add_label": "Add More",
- "name": "truncate_input_tokens",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "int",
- "value": 200
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "truncate_input_tokens",
+ "value": 200,
+ "display_name": "Truncate Input Tokens",
+ "advanced": true,
+ "dynamic": false,
+ "info": "",
+ "title_case": false,
+ "type": "int",
+ "_input_type": "IntInput"
},
"url": {
- "_input_type": "DropdownInput",
- "advanced": false,
- "combobox": false,
- "dialog_inputs": {},
- "display_name": "watsonx API Endpoint",
- "dynamic": false,
- "info": "The base URL of the API.",
- "name": "url",
- "options": [
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "options": [
"https://us-south.ml.cloud.ibm.com",
"https://eu-de.ml.cloud.ibm.com",
"https://eu-gb.ml.cloud.ibm.com",
"https://au-syd.ml.cloud.ibm.com",
"https://jp-tok.ml.cloud.ibm.com",
"https://ca-tor.ml.cloud.ibm.com"
- ],
- "options_metadata": [],
- "placeholder": "",
- "real_time_refresh": true,
- "required": false,
- "show": true,
- "title_case": false,
- "toggle": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "str",
- "value": "https://us-south.ml.cloud.ibm.com"
+ ],
+ "options_metadata": [],
+ "combobox": false,
+ "dialog_inputs": {},
+ "toggle": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "url",
+ "display_name": "watsonx API Endpoint",
+ "advanced": false,
+ "dynamic": false,
+ "info": "The base URL of the API.",
+ "real_time_refresh": true,
+ "title_case": false,
+ "external_options": {},
+ "type": "str",
+ "_input_type": "DropdownInput"
}
- },
- "tool_mode": false
+ },
+ "description": "Generate embeddings using IBM watsonx.ai models.",
+ "icon": "WatsonxAI",
+ "base_classes": ["Embeddings"],
+ "display_name": "IBM watsonx.ai Embeddings",
+ "documentation": "",
+ "minimized": false,
+ "custom_fields": {},
+ "output_types": [],
+ "pinned": false,
+ "conditional_paths": [],
+ "frozen": false,
+ "outputs": [
+ {
+ "types": ["Embeddings"],
+ "selected": "Embeddings",
+ "name": "embeddings",
+ "display_name": "Embedding Model",
+ "method": "build_embeddings",
+ "value": "__UNDEFINED__",
+ "cache": true,
+ "allows_loop": false,
+ "group_outputs": false,
+ "tool_mode": true
+ }
+ ],
+ "field_order": [
+ "url",
+ "project_id",
+ "api_key",
+ "model_name",
+ "truncate_input_tokens",
+ "input_text"
+ ],
+ "beta": false,
+ "legacy": false,
+ "edited": false,
+ "metadata": {
+ "module": "lfx.components.ibm.watsonx_embeddings.WatsonxEmbeddingsComponent",
+ "code_hash": "ffded413ea90",
+ "dependencies": {
+ "total_dependencies": 5,
+ "dependencies": [
+ { "name": "requests", "version": "2.32.5" },
+ { "name": "ibm_watsonx_ai", "version": "1.3.34" },
+ { "name": "langchain_ibm", "version": "0.3.16" },
+ { "name": "pydantic", "version": "2.10.6" },
+ { "name": "lfx", "version": null }
+ ]
+ }
+ },
+ "tool_mode": false,
+ "official": false
},
"showNode": true,
- "type": "WatsonxEmbeddingsComponent"
- },
- "dragging": false,
- "id": "WatsonxEmbeddingsComponent-pJfXI",
- "measured": {
- "height": 467,
- "width": 320
- },
- "position": {
- "x": 364.4406919374723,
- "y": 282.29319267029086
- },
- "selected": false,
- "type": "genericNode"
+ "type": "WatsonxEmbeddingsComponent",
+ "id": "WatsonxEmbeddingsComponent-q67FN"
+},
+"id": "WatsonxEmbeddingsComponent-q67FN",
+"position": { "x": 0, "y": 0 },
+"type": "genericNode"
}
\ No newline at end of file
diff --git a/flows/components/watsonx_llm.json b/flows/components/watsonx_llm.json
index 99a4a936..61baac42 100644
--- a/flows/components/watsonx_llm.json
+++ b/flows/components/watsonx_llm.json
@@ -1,18 +1,456 @@
{
"data": {
- "id": "IBMwatsonxModel-jA4Nw",
"node": {
+ "template": {
+ "_type": "Component",
+ "api_key": {
+ "load_from_db": true,
+ "required": true,
+ "placeholder": "",
+ "show": true,
+ "name": "api_key",
+ "value": "WATSONX_API_KEY",
+ "display_name": "Watsonx API Key",
+ "advanced": false,
+ "input_types": [],
+ "dynamic": false,
+ "info": "The API Key to use for the model.",
+ "title_case": false,
+ "password": true,
+ "type": "str",
+ "_input_type": "SecretStrInput"
+ },
+ "code": {
+ "type": "code",
+ "required": true,
+ "placeholder": "",
+ "list": false,
+ "show": true,
+ "multiline": true,
+ "value": "import json\nfrom typing import Any\n\nimport requests\nfrom langchain_ibm import ChatWatsonx\nfrom pydantic.v1 import SecretStr\n\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\n\n\nclass WatsonxAIComponent(LCModelComponent):\n display_name = \"IBM watsonx.ai\"\n description = \"Generate text using IBM watsonx.ai foundation models.\"\n icon = \"WatsonxAI\"\n name = \"IBMwatsonxModel\"\n beta = False\n\n _default_models = [\"ibm/granite-3-2b-instruct\", \"ibm/granite-3-8b-instruct\", \"ibm/granite-13b-instruct-v2\"]\n\n inputs = [\n *LCModelComponent.get_base_inputs(),\n DropdownInput(\n name=\"url\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API.\",\n value=None,\n options=[\n \"https://us-south.ml.cloud.ibm.com\",\n \"https://eu-de.ml.cloud.ibm.com\",\n \"https://eu-gb.ml.cloud.ibm.com\",\n \"https://au-syd.ml.cloud.ibm.com\",\n \"https://jp-tok.ml.cloud.ibm.com\",\n \"https://ca-tor.ml.cloud.ibm.com\",\n ],\n real_time_refresh=True,\n ),\n StrInput(\n name=\"project_id\",\n display_name=\"watsonx Project ID\",\n required=True,\n info=\"The project ID or deployment space ID that is associated with the foundation model.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Watsonx API Key\",\n info=\"The API Key to use for the model.\",\n required=True,\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=[],\n value=None,\n dynamic=True,\n required=True,\n ),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate.\",\n range_spec=RangeSpec(min=1, max=4096),\n value=1000,\n ),\n StrInput(\n name=\"stop_sequence\",\n display_name=\"Stop Sequence\",\n advanced=True,\n info=\"Sequence where generation should stop.\",\n field_type=\"str\",\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n info=\"Controls randomness, higher values increase diversity.\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=2, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"top_p\",\n display_name=\"Top P\",\n info=\"The cumulative probability cutoff for token selection. \"\n \"Lower values mean sampling from a smaller, more top-weighted nucleus.\",\n value=0.9,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"frequency_penalty\",\n display_name=\"Frequency Penalty\",\n info=\"Penalty for frequency of token usage.\",\n value=0.5,\n range_spec=RangeSpec(min=-2.0, max=2.0, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"presence_penalty\",\n display_name=\"Presence Penalty\",\n info=\"Penalty for token presence in prior text.\",\n value=0.3,\n range_spec=RangeSpec(min=-2.0, max=2.0, step=0.01),\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Random Seed\",\n advanced=True,\n info=\"The random seed for the model.\",\n value=8,\n ),\n BoolInput(\n name=\"logprobs\",\n display_name=\"Log Probabilities\",\n advanced=True,\n info=\"Whether to return log probabilities of the output tokens.\",\n value=True,\n ),\n IntInput(\n name=\"top_logprobs\",\n display_name=\"Top Log Probabilities\",\n advanced=True,\n info=\"Number of most likely tokens to return at each position.\",\n value=3,\n range_spec=RangeSpec(min=1, max=20),\n ),\n StrInput(\n name=\"logit_bias\",\n display_name=\"Logit Bias\",\n advanced=True,\n info='JSON string of token IDs to bias or suppress (e.g., {\"1003\": -100, \"1004\": 100}).',\n field_type=\"str\",\n ),\n ]\n\n @staticmethod\n def fetch_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\"version\": \"2024-09-16\", \"filters\": \"function_text_chat,!lifecycle_withdrawn\"}\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models. Using default models.\")\n return WatsonxAIComponent._default_models\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):\n \"\"\"Update model options when URL or API key changes.\"\"\"\n logger.info(\"Updating build config. Field name: %s, Field value: %s\", field_name, field_value)\n\n if field_name == \"url\" and field_value:\n try:\n models = self.fetch_models(base_url=build_config.url.value)\n build_config.model_name.options = models\n if build_config.model_name.value:\n build_config.model_name.value = models[0]\n info_message = f\"Updated model options: {len(models)} models found in {build_config.url.value}\"\n logger.info(info_message)\n except Exception: # noqa: BLE001\n logger.exception(\"Error updating model options.\")\n\n def build_model(self) -> LanguageModel:\n # Parse logit_bias from JSON string if provided\n logit_bias = None\n if hasattr(self, \"logit_bias\") and self.logit_bias:\n try:\n logit_bias = json.loads(self.logit_bias)\n except json.JSONDecodeError:\n logger.warning(\"Invalid logit_bias JSON format. Using default instead.\")\n logit_bias = {\"1003\": -100, \"1004\": -100}\n\n chat_params = {\n \"max_tokens\": getattr(self, \"max_tokens\", None),\n \"temperature\": getattr(self, \"temperature\", None),\n \"top_p\": getattr(self, \"top_p\", None),\n \"frequency_penalty\": getattr(self, \"frequency_penalty\", None),\n \"presence_penalty\": getattr(self, \"presence_penalty\", None),\n \"seed\": getattr(self, \"seed\", None),\n \"stop\": [self.stop_sequence] if self.stop_sequence else [],\n \"n\": 1,\n \"logprobs\": getattr(self, \"logprobs\", True),\n \"top_logprobs\": getattr(self, \"top_logprobs\", None),\n \"time_limit\": 600000,\n \"logit_bias\": logit_bias,\n }\n\n return ChatWatsonx(\n apikey=SecretStr(self.api_key).get_secret_value(),\n url=self.url,\n project_id=self.project_id,\n model_id=self.model_name,\n params=chat_params,\n streaming=self.stream,\n )\n",
+ "fileTypes": [],
+ "file_path": "",
+ "password": false,
+ "name": "code",
+ "advanced": true,
+ "dynamic": true,
+ "info": "",
+ "load_from_db": false,
+ "title_case": false
+ },
+ "frequency_penalty": {
+ "tool_mode": false,
+ "min_label": "",
+ "max_label": "",
+ "min_label_icon": "",
+ "max_label_icon": "",
+ "slider_buttons": false,
+ "slider_buttons_options": [],
+ "slider_input": false,
+ "range_spec": {
+ "step_type": "float",
+ "min": -2,
+ "max": 2,
+ "step": 0.01
+ },
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "frequency_penalty",
+ "value": 0.5,
+ "display_name": "Frequency Penalty",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Penalty for frequency of token usage.",
+ "title_case": false,
+ "type": "slider",
+ "_input_type": "SliderInput"
+ },
+ "input_value": {
+ "trace_as_input": true,
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "input_value",
+ "value": "",
+ "display_name": "Input",
+ "advanced": false,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "MessageInput"
+ },
+ "logit_bias": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "logit_bias",
+ "value": "",
+ "display_name": "Logit Bias",
+ "advanced": true,
+ "dynamic": false,
+ "info": "JSON string of token IDs to bias or suppress (e.g., {\"1003\": -100, \"1004\": 100}).",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "StrInput"
+ },
+ "logprobs": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "logprobs",
+ "value": true,
+ "display_name": "Log Probabilities",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Whether to return log probabilities of the output tokens.",
+ "title_case": false,
+ "type": "bool",
+ "_input_type": "BoolInput"
+ },
+ "max_tokens": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "range_spec": {
+ "step_type": "float",
+ "min": 1,
+ "max": 4096,
+ "step": 0.1
+ },
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "max_tokens",
+ "value": 1000,
+ "display_name": "Max Tokens",
+ "advanced": true,
+ "dynamic": false,
+ "info": "The maximum number of tokens to generate.",
+ "title_case": false,
+ "type": "int",
+ "_input_type": "IntInput"
+ },
+ "model_name": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "options": [],
+ "options_metadata": [],
+ "combobox": false,
+ "dialog_inputs": {},
+ "toggle": false,
+ "required": true,
+ "placeholder": "",
+ "show": true,
+ "name": "model_name",
+ "display_name": "Model Name",
+ "advanced": false,
+ "dynamic": true,
+ "info": "",
+ "title_case": false,
+ "external_options": {},
+ "type": "str",
+ "_input_type": "DropdownInput"
+ },
+ "presence_penalty": {
+ "tool_mode": false,
+ "min_label": "",
+ "max_label": "",
+ "min_label_icon": "",
+ "max_label_icon": "",
+ "slider_buttons": false,
+ "slider_buttons_options": [],
+ "slider_input": false,
+ "range_spec": {
+ "step_type": "float",
+ "min": -2,
+ "max": 2,
+ "step": 0.01
+ },
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "presence_penalty",
+ "value": 0.3,
+ "display_name": "Presence Penalty",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Penalty for token presence in prior text.",
+ "title_case": false,
+ "type": "slider",
+ "_input_type": "SliderInput"
+ },
+ "project_id": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "load_from_db": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": true,
+ "placeholder": "",
+ "show": true,
+ "name": "project_id",
+ "value": "WATSONX_PROJECT_ID",
+ "display_name": "watsonx Project ID",
+ "advanced": false,
+ "dynamic": false,
+ "info": "The project ID or deployment space ID that is associated with the foundation model.",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "StrInput"
+ },
+ "seed": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "seed",
+ "value": 8,
+ "display_name": "Random Seed",
+ "advanced": true,
+ "dynamic": false,
+ "info": "The random seed for the model.",
+ "title_case": false,
+ "type": "int",
+ "_input_type": "IntInput"
+ },
+ "stop_sequence": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "stop_sequence",
+ "value": "",
+ "display_name": "Stop Sequence",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Sequence where generation should stop.",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "StrInput"
+ },
+ "stream": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "stream",
+ "value": false,
+ "display_name": "Stream",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Stream the response from the model. Streaming works only in Chat.",
+ "title_case": false,
+ "type": "bool",
+ "_input_type": "BoolInput"
+ },
+ "system_message": {
+ "tool_mode": false,
+ "trace_as_input": true,
+ "multiline": true,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "system_message",
+ "value": "",
+ "display_name": "System Message",
+ "advanced": false,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "System message to pass to the model.",
+ "title_case": false,
+ "copy_field": false,
+ "type": "str",
+ "_input_type": "MultilineInput"
+ },
+ "temperature": {
+ "tool_mode": false,
+ "min_label": "",
+ "max_label": "",
+ "min_label_icon": "",
+ "max_label_icon": "",
+ "slider_buttons": false,
+ "slider_buttons_options": [],
+ "slider_input": false,
+ "range_spec": {
+ "step_type": "float",
+ "min": 0,
+ "max": 2,
+ "step": 0.01
+ },
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "temperature",
+ "value": 0.1,
+ "display_name": "Temperature",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Controls randomness, higher values increase diversity.",
+ "title_case": false,
+ "type": "slider",
+ "_input_type": "SliderInput"
+ },
+ "top_logprobs": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "range_spec": {
+ "step_type": "float",
+ "min": 1,
+ "max": 20,
+ "step": 0.1
+ },
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "top_logprobs",
+ "value": 3,
+ "display_name": "Top Log Probabilities",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Number of most likely tokens to return at each position.",
+ "title_case": false,
+ "type": "int",
+ "_input_type": "IntInput"
+ },
+ "top_p": {
+ "tool_mode": false,
+ "min_label": "",
+ "max_label": "",
+ "min_label_icon": "",
+ "max_label_icon": "",
+ "slider_buttons": false,
+ "slider_buttons_options": [],
+ "slider_input": false,
+ "range_spec": {
+ "step_type": "float",
+ "min": 0,
+ "max": 1,
+ "step": 0.01
+ },
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "top_p",
+ "value": 0.9,
+ "display_name": "Top P",
+ "advanced": true,
+ "dynamic": false,
+ "info": "The cumulative probability cutoff for token selection. Lower values mean sampling from a smaller, more top-weighted nucleus.",
+ "title_case": false,
+ "type": "slider",
+ "_input_type": "SliderInput"
+ },
+ "url": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "options": [
+ "https://us-south.ml.cloud.ibm.com",
+ "https://eu-de.ml.cloud.ibm.com",
+ "https://eu-gb.ml.cloud.ibm.com",
+ "https://au-syd.ml.cloud.ibm.com",
+ "https://jp-tok.ml.cloud.ibm.com",
+ "https://ca-tor.ml.cloud.ibm.com"
+ ],
+ "options_metadata": [],
+ "combobox": false,
+ "dialog_inputs": {},
+ "toggle": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "url",
+ "display_name": "watsonx API Endpoint",
+ "advanced": false,
+ "dynamic": false,
+ "info": "The base URL of the API.",
+ "real_time_refresh": true,
+ "title_case": false,
+ "external_options": {},
+ "type": "str",
+ "_input_type": "DropdownInput"
+ }
+ },
+ "description": "Generate text using IBM watsonx.ai foundation models.",
+ "icon": "WatsonxAI",
"base_classes": [
"LanguageModel",
"Message"
],
- "beta": false,
- "conditional_paths": [],
- "custom_fields": {},
- "description": "Generate text using IBM watsonx.ai foundation models.",
"display_name": "IBM watsonx.ai",
"documentation": "",
- "edited": false,
+ "minimized": false,
+ "custom_fields": {},
+ "output_types": [],
+ "pinned": false,
+ "conditional_paths": [],
+ "frozen": false,
+ "outputs": [
+ {
+ "types": [
+ "Message"
+ ],
+ "name": "text_output",
+ "display_name": "Model Response",
+ "method": "text_response",
+ "value": "__UNDEFINED__",
+ "cache": true,
+ "allows_loop": false,
+ "group_outputs": false,
+ "tool_mode": true
+ },
+ {
+ "types": [
+ "LanguageModel"
+ ],
+ "selected": "LanguageModel",
+ "name": "model_output",
+ "display_name": "Language Model",
+ "method": "build_model",
+ "value": "__UNDEFINED__",
+ "cache": true,
+ "allows_loop": false,
+ "group_outputs": false,
+ "tool_mode": true
+ }
+ ],
"field_order": [
"input_value",
"system_message",
@@ -32,13 +470,20 @@
"top_logprobs",
"logit_bias"
],
- "frozen": false,
- "icon": "WatsonxAI",
- "last_updated": "2025-09-22T20:03:31.248Z",
+ "beta": false,
"legacy": false,
+ "edited": false,
"metadata": {
- "code_hash": "7767fd69a954",
+ "keywords": [
+ "model",
+ "llm",
+ "language model",
+ "large language model"
+ ],
+ "module": "lfx.components.ibm.watsonx.WatsonxAIComponent",
+ "code_hash": "85c24939214c",
"dependencies": {
+ "total_dependencies": 4,
"dependencies": [
{
"name": "requests",
@@ -53,498 +498,24 @@
"version": "2.10.6"
},
{
- "name": "langflow",
+ "name": "lfx",
"version": null
}
- ],
- "total_dependencies": 4
- },
- "keywords": [
- "model",
- "llm",
- "language model",
- "large language model"
- ],
- "module": "langflow.components.ibm.watsonx.WatsonxAIComponent"
- },
- "minimized": false,
- "output_types": [],
- "outputs": [
- {
- "allows_loop": false,
- "cache": true,
- "display_name": "Model Response",
- "group_outputs": false,
- "method": "text_response",
- "name": "text_output",
- "options": null,
- "required_inputs": null,
- "tool_mode": true,
- "types": [
- "Message"
- ],
- "value": "__UNDEFINED__"
- },
- {
- "allows_loop": false,
- "cache": true,
- "display_name": "Language Model",
- "group_outputs": false,
- "method": "build_model",
- "name": "model_output",
- "options": null,
- "required_inputs": null,
- "selected": "LanguageModel",
- "tool_mode": true,
- "types": [
- "LanguageModel"
- ],
- "value": "__UNDEFINED__"
- }
- ],
- "pinned": false,
- "template": {
- "_type": "Component",
- "api_key": {
- "_input_type": "SecretStrInput",
- "advanced": false,
- "display_name": "API Key",
- "dynamic": false,
- "info": "The API Key to use for the model.",
- "input_types": [],
- "load_from_db": true,
- "name": "api_key",
- "password": true,
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "type": "str",
- "value": "WATSONX_API_KEY"
- },
- "code": {
- "advanced": true,
- "dynamic": true,
- "fileTypes": [],
- "file_path": "",
- "info": "",
- "list": false,
- "load_from_db": false,
- "multiline": true,
- "name": "code",
- "password": false,
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "type": "code",
- "value": "import json\nfrom typing import Any\n\nimport requests\nfrom langchain_ibm import ChatWatsonx\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging.logger import logger\nfrom langflow.schema.dotdict import dotdict\n\n\nclass WatsonxAIComponent(LCModelComponent):\n display_name = \"IBM watsonx.ai\"\n description = \"Generate text using IBM watsonx.ai foundation models.\"\n icon = \"WatsonxAI\"\n name = \"IBMwatsonxModel\"\n beta = False\n\n _default_models = [\"ibm/granite-3-2b-instruct\", \"ibm/granite-3-8b-instruct\", \"ibm/granite-13b-instruct-v2\"]\n\n inputs = [\n *LCModelComponent._base_inputs,\n DropdownInput(\n name=\"url\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API.\",\n value=None,\n options=[\n \"https://us-south.ml.cloud.ibm.com\",\n \"https://eu-de.ml.cloud.ibm.com\",\n \"https://eu-gb.ml.cloud.ibm.com\",\n \"https://au-syd.ml.cloud.ibm.com\",\n \"https://jp-tok.ml.cloud.ibm.com\",\n \"https://ca-tor.ml.cloud.ibm.com\",\n ],\n real_time_refresh=True,\n ),\n StrInput(\n name=\"project_id\",\n display_name=\"watsonx Project ID\",\n required=True,\n info=\"The project ID or deployment space ID that is associated with the foundation model.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"The API Key to use for the model.\",\n required=True,\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=[],\n value=None,\n dynamic=True,\n required=True,\n ),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate.\",\n range_spec=RangeSpec(min=1, max=4096),\n value=1000,\n ),\n StrInput(\n name=\"stop_sequence\",\n display_name=\"Stop Sequence\",\n advanced=True,\n info=\"Sequence where generation should stop.\",\n field_type=\"str\",\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n info=\"Controls randomness, higher values increase diversity.\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=2, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"top_p\",\n display_name=\"Top P\",\n info=\"The cumulative probability cutoff for token selection. \"\n \"Lower values mean sampling from a smaller, more top-weighted nucleus.\",\n value=0.9,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"frequency_penalty\",\n display_name=\"Frequency Penalty\",\n info=\"Penalty for frequency of token usage.\",\n value=0.5,\n range_spec=RangeSpec(min=-2.0, max=2.0, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"presence_penalty\",\n display_name=\"Presence Penalty\",\n info=\"Penalty for token presence in prior text.\",\n value=0.3,\n range_spec=RangeSpec(min=-2.0, max=2.0, step=0.01),\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Random Seed\",\n advanced=True,\n info=\"The random seed for the model.\",\n value=8,\n ),\n BoolInput(\n name=\"logprobs\",\n display_name=\"Log Probabilities\",\n advanced=True,\n info=\"Whether to return log probabilities of the output tokens.\",\n value=True,\n ),\n IntInput(\n name=\"top_logprobs\",\n display_name=\"Top Log Probabilities\",\n advanced=True,\n info=\"Number of most likely tokens to return at each position.\",\n value=3,\n range_spec=RangeSpec(min=1, max=20),\n ),\n StrInput(\n name=\"logit_bias\",\n display_name=\"Logit Bias\",\n advanced=True,\n info='JSON string of token IDs to bias or suppress (e.g., {\"1003\": -100, \"1004\": 100}).',\n field_type=\"str\",\n ),\n ]\n\n @staticmethod\n def fetch_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\"version\": \"2024-09-16\", \"filters\": \"function_text_chat,!lifecycle_withdrawn\"}\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models. Using default models.\")\n return WatsonxAIComponent._default_models\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):\n \"\"\"Update model options when URL or API key changes.\"\"\"\n logger.info(\"Updating build config. Field name: %s, Field value: %s\", field_name, field_value)\n\n if field_name == \"url\" and field_value:\n try:\n models = self.fetch_models(base_url=build_config.url.value)\n build_config.model_name.options = models\n if build_config.model_name.value:\n build_config.model_name.value = models[0]\n info_message = f\"Updated model options: {len(models)} models found in {build_config.url.value}\"\n logger.info(info_message)\n except Exception: # noqa: BLE001\n logger.exception(\"Error updating model options.\")\n\n def build_model(self) -> LanguageModel:\n # Parse logit_bias from JSON string if provided\n logit_bias = None\n if hasattr(self, \"logit_bias\") and self.logit_bias:\n try:\n logit_bias = json.loads(self.logit_bias)\n except json.JSONDecodeError:\n logger.warning(\"Invalid logit_bias JSON format. Using default instead.\")\n logit_bias = {\"1003\": -100, \"1004\": -100}\n\n chat_params = {\n \"max_tokens\": getattr(self, \"max_tokens\", None),\n \"temperature\": getattr(self, \"temperature\", None),\n \"top_p\": getattr(self, \"top_p\", None),\n \"frequency_penalty\": getattr(self, \"frequency_penalty\", None),\n \"presence_penalty\": getattr(self, \"presence_penalty\", None),\n \"seed\": getattr(self, \"seed\", None),\n \"stop\": [self.stop_sequence] if self.stop_sequence else [],\n \"n\": 1,\n \"logprobs\": getattr(self, \"logprobs\", True),\n \"top_logprobs\": getattr(self, \"top_logprobs\", None),\n \"time_limit\": 600000,\n \"logit_bias\": logit_bias,\n }\n\n return ChatWatsonx(\n apikey=SecretStr(self.api_key).get_secret_value(),\n url=self.url,\n project_id=self.project_id,\n model_id=self.model_name,\n params=chat_params,\n streaming=self.stream,\n )\n"
- },
- "frequency_penalty": {
- "_input_type": "SliderInput",
- "advanced": true,
- "display_name": "Frequency Penalty",
- "dynamic": false,
- "info": "Penalty for frequency of token usage.",
- "max_label": "",
- "max_label_icon": "",
- "min_label": "",
- "min_label_icon": "",
- "name": "frequency_penalty",
- "placeholder": "",
- "range_spec": {
- "max": 2,
- "min": -2,
- "step": 0.01,
- "step_type": "float"
- },
- "required": false,
- "show": true,
- "slider_buttons": false,
- "slider_buttons_options": [],
- "slider_input": false,
- "title_case": false,
- "tool_mode": false,
- "type": "slider",
- "value": 0.5
- },
- "input_value": {
- "_input_type": "MessageInput",
- "advanced": false,
- "display_name": "Input",
- "dynamic": false,
- "info": "",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "input_value",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "logit_bias": {
- "_input_type": "StrInput",
- "advanced": true,
- "display_name": "Logit Bias",
- "dynamic": false,
- "info": "JSON string of token IDs to bias or suppress (e.g., {\"1003\": -100, \"1004\": 100}).",
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "logit_bias",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "logprobs": {
- "_input_type": "BoolInput",
- "advanced": true,
- "display_name": "Log Probabilities",
- "dynamic": false,
- "info": "Whether to return log probabilities of the output tokens.",
- "list": false,
- "list_add_label": "Add More",
- "name": "logprobs",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "bool",
- "value": true
- },
- "max_tokens": {
- "_input_type": "IntInput",
- "advanced": true,
- "display_name": "Max Tokens",
- "dynamic": false,
- "info": "The maximum number of tokens to generate.",
- "list": false,
- "list_add_label": "Add More",
- "name": "max_tokens",
- "placeholder": "",
- "range_spec": {
- "max": 4096,
- "min": 1,
- "step": 0.1,
- "step_type": "float"
- },
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "int",
- "value": 1000
- },
- "model_name": {
- "_input_type": "DropdownInput",
- "advanced": false,
- "combobox": false,
- "dialog_inputs": {},
- "display_name": "Model Name",
- "dynamic": true,
- "info": "",
- "name": "model_name",
- "options": [
- "ibm/granite-3-2-8b-instruct",
- "ibm/granite-3-2b-instruct",
- "ibm/granite-3-3-8b-instruct",
- "ibm/granite-3-8b-instruct",
- "ibm/granite-guardian-3-2b",
- "ibm/granite-guardian-3-8b",
- "ibm/granite-vision-3-2-2b",
- "meta-llama/llama-3-2-11b-vision-instruct",
- "meta-llama/llama-3-2-90b-vision-instruct",
- "meta-llama/llama-3-3-70b-instruct",
- "meta-llama/llama-3-405b-instruct",
- "meta-llama/llama-4-maverick-17b-128e-instruct-fp8",
- "meta-llama/llama-guard-3-11b-vision",
- "mistralai/mistral-large",
- "mistralai/mistral-medium-2505",
- "mistralai/mistral-small-3-1-24b-instruct-2503",
- "mistralai/pixtral-12b",
- "openai/gpt-oss-120b"
- ],
- "options_metadata": [],
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "toggle": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "str",
- "value": "ibm/granite-3-2-8b-instruct"
- },
- "presence_penalty": {
- "_input_type": "SliderInput",
- "advanced": true,
- "display_name": "Presence Penalty",
- "dynamic": false,
- "info": "Penalty for token presence in prior text.",
- "max_label": "",
- "max_label_icon": "",
- "min_label": "",
- "min_label_icon": "",
- "name": "presence_penalty",
- "placeholder": "",
- "range_spec": {
- "max": 2,
- "min": -2,
- "step": 0.01,
- "step_type": "float"
- },
- "required": false,
- "show": true,
- "slider_buttons": false,
- "slider_buttons_options": [],
- "slider_input": false,
- "title_case": false,
- "tool_mode": false,
- "type": "slider",
- "value": 0.3
- },
- "project_id": {
- "_input_type": "StrInput",
- "advanced": false,
- "display_name": "watsonx Project ID",
- "dynamic": false,
- "info": "The project ID or deployment space ID that is associated with the foundation model.",
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": true,
- "name": "project_id",
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "str",
- "value": "WATSONX_PROJECT_ID"
- },
- "seed": {
- "_input_type": "IntInput",
- "advanced": true,
- "display_name": "Random Seed",
- "dynamic": false,
- "info": "The random seed for the model.",
- "list": false,
- "list_add_label": "Add More",
- "name": "seed",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "int",
- "value": 8
- },
- "stop_sequence": {
- "_input_type": "StrInput",
- "advanced": true,
- "display_name": "Stop Sequence",
- "dynamic": false,
- "info": "Sequence where generation should stop.",
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "stop_sequence",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "stream": {
- "_input_type": "BoolInput",
- "advanced": true,
- "display_name": "Stream",
- "dynamic": false,
- "info": "Stream the response from the model. Streaming works only in Chat.",
- "list": false,
- "list_add_label": "Add More",
- "name": "stream",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "bool",
- "value": false
- },
- "system_message": {
- "_input_type": "MultilineInput",
- "advanced": false,
- "copy_field": false,
- "display_name": "System Message",
- "dynamic": false,
- "info": "System message to pass to the model.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "multiline": true,
- "name": "system_message",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "temperature": {
- "_input_type": "SliderInput",
- "advanced": true,
- "display_name": "Temperature",
- "dynamic": false,
- "info": "Controls randomness, higher values increase diversity.",
- "max_label": "",
- "max_label_icon": "",
- "min_label": "",
- "min_label_icon": "",
- "name": "temperature",
- "placeholder": "",
- "range_spec": {
- "max": 2,
- "min": 0,
- "step": 0.01,
- "step_type": "float"
- },
- "required": false,
- "show": true,
- "slider_buttons": false,
- "slider_buttons_options": [],
- "slider_input": false,
- "title_case": false,
- "tool_mode": false,
- "type": "slider",
- "value": 0.1
- },
- "top_logprobs": {
- "_input_type": "IntInput",
- "advanced": true,
- "display_name": "Top Log Probabilities",
- "dynamic": false,
- "info": "Number of most likely tokens to return at each position.",
- "list": false,
- "list_add_label": "Add More",
- "name": "top_logprobs",
- "placeholder": "",
- "range_spec": {
- "max": 20,
- "min": 1,
- "step": 0.1,
- "step_type": "float"
- },
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "int",
- "value": 3
- },
- "top_p": {
- "_input_type": "SliderInput",
- "advanced": true,
- "display_name": "Top P",
- "dynamic": false,
- "info": "The cumulative probability cutoff for token selection. Lower values mean sampling from a smaller, more top-weighted nucleus.",
- "max_label": "",
- "max_label_icon": "",
- "min_label": "",
- "min_label_icon": "",
- "name": "top_p",
- "placeholder": "",
- "range_spec": {
- "max": 1,
- "min": 0,
- "step": 0.01,
- "step_type": "float"
- },
- "required": false,
- "show": true,
- "slider_buttons": false,
- "slider_buttons_options": [],
- "slider_input": false,
- "title_case": false,
- "tool_mode": false,
- "type": "slider",
- "value": 0.9
- },
- "url": {
- "_input_type": "DropdownInput",
- "advanced": false,
- "combobox": false,
- "dialog_inputs": {},
- "display_name": "watsonx API Endpoint",
- "dynamic": false,
- "info": "The base URL of the API.",
- "name": "url",
- "options": [
- "https://us-south.ml.cloud.ibm.com",
- "https://eu-de.ml.cloud.ibm.com",
- "https://eu-gb.ml.cloud.ibm.com",
- "https://au-syd.ml.cloud.ibm.com",
- "https://jp-tok.ml.cloud.ibm.com",
- "https://ca-tor.ml.cloud.ibm.com"
- ],
- "options_metadata": [],
- "placeholder": "",
- "real_time_refresh": true,
- "required": false,
- "show": true,
- "title_case": false,
- "toggle": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "str",
- "value": "https://us-south.ml.cloud.ibm.com"
+ ]
}
},
- "tool_mode": false
+ "tool_mode": false,
+ "official": false
},
- "selected_output": "model_output",
"showNode": true,
- "type": "IBMwatsonxModel"
- },
- "dragging": false,
- "id": "IBMwatsonxModel-jA4Nw",
- "measured": {
- "height": 632,
- "width": 320
+ "type": "IBMwatsonxModel",
+ "id": "IBMwatsonxModel-qXZxc",
+ "selected_output": "model_output"
},
+ "id": "IBMwatsonxModel-qXZxc",
"position": {
- "x": 371.93566807042805,
- "y": 197.47711431325635
+ "x": 0,
+ "y": 0
},
- "selected": false,
"type": "genericNode"
}
\ No newline at end of file
diff --git a/flows/components/watsonx_llm_text.json b/flows/components/watsonx_llm_text.json
index a2966a48..f0f42455 100644
--- a/flows/components/watsonx_llm_text.json
+++ b/flows/components/watsonx_llm_text.json
@@ -1,551 +1,521 @@
{
- "data": {
- "id": "IBMwatsonxModel-18kmA",
- "node": {
- "base_classes": [
- "LanguageModel",
- "Message"
- ],
- "beta": false,
- "conditional_paths": [],
- "custom_fields": {},
- "description": "Generate text using IBM watsonx.ai foundation models.",
- "display_name": "IBM watsonx.ai",
- "documentation": "",
- "edited": false,
- "field_order": [
- "input_value",
- "system_message",
- "stream",
- "url",
- "project_id",
- "api_key",
- "model_name",
- "max_tokens",
- "stop_sequence",
- "temperature",
- "top_p",
- "frequency_penalty",
- "presence_penalty",
- "seed",
- "logprobs",
- "top_logprobs",
- "logit_bias"
- ],
- "frozen": false,
- "icon": "WatsonxAI",
- "last_updated": "2025-09-22T20:03:31.248Z",
- "legacy": false,
- "metadata": {
- "code_hash": "7767fd69a954",
- "dependencies": {
- "dependencies": [
- {
- "name": "requests",
- "version": "2.32.5"
- },
- {
- "name": "langchain_ibm",
- "version": "0.3.16"
- },
- {
- "name": "pydantic",
- "version": "2.10.6"
- },
- {
- "name": "langflow",
- "version": null
- }
- ],
- "total_dependencies": 4
+ "data": {
+ "node": {
+ "template": {
+ "_type": "Component",
+ "api_key": {
+ "load_from_db": true,
+ "required": true,
+ "placeholder": "",
+ "show": true,
+ "name": "api_key",
+ "value": "WATSONX_API_KEY",
+ "display_name": "Watsonx API Key",
+ "advanced": false,
+ "input_types": [],
+ "dynamic": false,
+ "info": "The API Key to use for the model.",
+ "title_case": false,
+ "password": true,
+ "type": "str",
+ "_input_type": "SecretStrInput"
+ },
+ "code": {
+ "type": "code",
+ "required": true,
+ "placeholder": "",
+ "list": false,
+ "show": true,
+ "multiline": true,
+ "value": "import json\nfrom typing import Any\n\nimport requests\nfrom langchain_ibm import ChatWatsonx\nfrom pydantic.v1 import SecretStr\n\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.field_typing import LanguageModel\nfrom lfx.field_typing.range_spec import RangeSpec\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\n\n\nclass WatsonxAIComponent(LCModelComponent):\n display_name = \"IBM watsonx.ai\"\n description = \"Generate text using IBM watsonx.ai foundation models.\"\n icon = \"WatsonxAI\"\n name = \"IBMwatsonxModel\"\n beta = False\n\n _default_models = [\"ibm/granite-3-2b-instruct\", \"ibm/granite-3-8b-instruct\", \"ibm/granite-13b-instruct-v2\"]\n\n inputs = [\n *LCModelComponent.get_base_inputs(),\n DropdownInput(\n name=\"url\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API.\",\n value=None,\n options=[\n \"https://us-south.ml.cloud.ibm.com\",\n \"https://eu-de.ml.cloud.ibm.com\",\n \"https://eu-gb.ml.cloud.ibm.com\",\n \"https://au-syd.ml.cloud.ibm.com\",\n \"https://jp-tok.ml.cloud.ibm.com\",\n \"https://ca-tor.ml.cloud.ibm.com\",\n ],\n real_time_refresh=True,\n ),\n StrInput(\n name=\"project_id\",\n display_name=\"watsonx Project ID\",\n required=True,\n info=\"The project ID or deployment space ID that is associated with the foundation model.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Watsonx API Key\",\n info=\"The API Key to use for the model.\",\n required=True,\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=[],\n value=None,\n dynamic=True,\n required=True,\n ),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate.\",\n range_spec=RangeSpec(min=1, max=4096),\n value=1000,\n ),\n StrInput(\n name=\"stop_sequence\",\n display_name=\"Stop Sequence\",\n advanced=True,\n info=\"Sequence where generation should stop.\",\n field_type=\"str\",\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n info=\"Controls randomness, higher values increase diversity.\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=2, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"top_p\",\n display_name=\"Top P\",\n info=\"The cumulative probability cutoff for token selection. \"\n \"Lower values mean sampling from a smaller, more top-weighted nucleus.\",\n value=0.9,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"frequency_penalty\",\n display_name=\"Frequency Penalty\",\n info=\"Penalty for frequency of token usage.\",\n value=0.5,\n range_spec=RangeSpec(min=-2.0, max=2.0, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"presence_penalty\",\n display_name=\"Presence Penalty\",\n info=\"Penalty for token presence in prior text.\",\n value=0.3,\n range_spec=RangeSpec(min=-2.0, max=2.0, step=0.01),\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Random Seed\",\n advanced=True,\n info=\"The random seed for the model.\",\n value=8,\n ),\n BoolInput(\n name=\"logprobs\",\n display_name=\"Log Probabilities\",\n advanced=True,\n info=\"Whether to return log probabilities of the output tokens.\",\n value=True,\n ),\n IntInput(\n name=\"top_logprobs\",\n display_name=\"Top Log Probabilities\",\n advanced=True,\n info=\"Number of most likely tokens to return at each position.\",\n value=3,\n range_spec=RangeSpec(min=1, max=20),\n ),\n StrInput(\n name=\"logit_bias\",\n display_name=\"Logit Bias\",\n advanced=True,\n info='JSON string of token IDs to bias or suppress (e.g., {\"1003\": -100, \"1004\": 100}).',\n field_type=\"str\",\n ),\n ]\n\n @staticmethod\n def fetch_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\"version\": \"2024-09-16\", \"filters\": \"function_text_chat,!lifecycle_withdrawn\"}\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models. Using default models.\")\n return WatsonxAIComponent._default_models\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):\n \"\"\"Update model options when URL or API key changes.\"\"\"\n logger.info(\"Updating build config. Field name: %s, Field value: %s\", field_name, field_value)\n\n if field_name == \"url\" and field_value:\n try:\n models = self.fetch_models(base_url=build_config.url.value)\n build_config.model_name.options = models\n if build_config.model_name.value:\n build_config.model_name.value = models[0]\n info_message = f\"Updated model options: {len(models)} models found in {build_config.url.value}\"\n logger.info(info_message)\n except Exception: # noqa: BLE001\n logger.exception(\"Error updating model options.\")\n\n def build_model(self) -> LanguageModel:\n # Parse logit_bias from JSON string if provided\n logit_bias = None\n if hasattr(self, \"logit_bias\") and self.logit_bias:\n try:\n logit_bias = json.loads(self.logit_bias)\n except json.JSONDecodeError:\n logger.warning(\"Invalid logit_bias JSON format. Using default instead.\")\n logit_bias = {\"1003\": -100, \"1004\": -100}\n\n chat_params = {\n \"max_tokens\": getattr(self, \"max_tokens\", None),\n \"temperature\": getattr(self, \"temperature\", None),\n \"top_p\": getattr(self, \"top_p\", None),\n \"frequency_penalty\": getattr(self, \"frequency_penalty\", None),\n \"presence_penalty\": getattr(self, \"presence_penalty\", None),\n \"seed\": getattr(self, \"seed\", None),\n \"stop\": [self.stop_sequence] if self.stop_sequence else [],\n \"n\": 1,\n \"logprobs\": getattr(self, \"logprobs\", True),\n \"top_logprobs\": getattr(self, \"top_logprobs\", None),\n \"time_limit\": 600000,\n \"logit_bias\": logit_bias,\n }\n\n return ChatWatsonx(\n apikey=SecretStr(self.api_key).get_secret_value(),\n url=self.url,\n project_id=self.project_id,\n model_id=self.model_name,\n params=chat_params,\n streaming=self.stream,\n )\n",
+ "fileTypes": [],
+ "file_path": "",
+ "password": false,
+ "name": "code",
+ "advanced": true,
+ "dynamic": true,
+ "info": "",
+ "load_from_db": false,
+ "title_case": false
+ },
+ "frequency_penalty": {
+ "tool_mode": false,
+ "min_label": "",
+ "max_label": "",
+ "min_label_icon": "",
+ "max_label_icon": "",
+ "slider_buttons": false,
+ "slider_buttons_options": [],
+ "slider_input": false,
+ "range_spec": {
+ "step_type": "float",
+ "min": -2,
+ "max": 2,
+ "step": 0.01
},
- "keywords": [
- "model",
- "llm",
- "language model",
- "large language model"
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "frequency_penalty",
+ "value": 0.5,
+ "display_name": "Frequency Penalty",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Penalty for frequency of token usage.",
+ "title_case": false,
+ "type": "slider",
+ "_input_type": "SliderInput"
+ },
+ "input_value": {
+ "trace_as_input": true,
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "input_value",
+ "value": "",
+ "display_name": "Input",
+ "advanced": false,
+ "input_types": [
+ "Message"
],
- "module": "langflow.components.ibm.watsonx.WatsonxAIComponent"
+ "dynamic": false,
+ "info": "",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "MessageInput"
},
- "minimized": false,
- "output_types": [],
- "outputs": [
- {
- "allows_loop": false,
- "cache": true,
- "display_name": "Model Response",
- "group_outputs": false,
- "method": "text_response",
- "name": "text_output",
- "options": null,
- "required_inputs": null,
- "selected": "Message",
- "tool_mode": true,
- "types": [
- "Message"
- ],
- "value": "__UNDEFINED__"
- },
- {
- "allows_loop": false,
- "cache": true,
- "display_name": "Language Model",
- "group_outputs": false,
- "method": "build_model",
- "name": "model_output",
- "options": null,
- "required_inputs": null,
- "selected": "LanguageModel",
- "tool_mode": true,
- "types": [
- "LanguageModel"
- ],
- "value": "__UNDEFINED__"
- }
- ],
- "pinned": false,
- "template": {
- "_type": "Component",
- "api_key": {
- "_input_type": "SecretStrInput",
- "advanced": false,
- "display_name": "API Key",
- "dynamic": false,
- "info": "The API Key to use for the model.",
- "input_types": [],
- "load_from_db": true,
- "name": "api_key",
- "password": true,
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "type": "str",
- "value": "WATSONX_API_KEY"
- },
- "code": {
- "advanced": true,
- "dynamic": true,
- "fileTypes": [],
- "file_path": "",
- "info": "",
- "list": false,
- "load_from_db": false,
- "multiline": true,
- "name": "code",
- "password": false,
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "type": "code",
- "value": "import json\nfrom typing import Any\n\nimport requests\nfrom langchain_ibm import ChatWatsonx\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs.inputs import BoolInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\nfrom langflow.logging.logger import logger\nfrom langflow.schema.dotdict import dotdict\n\n\nclass WatsonxAIComponent(LCModelComponent):\n display_name = \"IBM watsonx.ai\"\n description = \"Generate text using IBM watsonx.ai foundation models.\"\n icon = \"WatsonxAI\"\n name = \"IBMwatsonxModel\"\n beta = False\n\n _default_models = [\"ibm/granite-3-2b-instruct\", \"ibm/granite-3-8b-instruct\", \"ibm/granite-13b-instruct-v2\"]\n\n inputs = [\n *LCModelComponent._base_inputs,\n DropdownInput(\n name=\"url\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API.\",\n value=None,\n options=[\n \"https://us-south.ml.cloud.ibm.com\",\n \"https://eu-de.ml.cloud.ibm.com\",\n \"https://eu-gb.ml.cloud.ibm.com\",\n \"https://au-syd.ml.cloud.ibm.com\",\n \"https://jp-tok.ml.cloud.ibm.com\",\n \"https://ca-tor.ml.cloud.ibm.com\",\n ],\n real_time_refresh=True,\n ),\n StrInput(\n name=\"project_id\",\n display_name=\"watsonx Project ID\",\n required=True,\n info=\"The project ID or deployment space ID that is associated with the foundation model.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"The API Key to use for the model.\",\n required=True,\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n options=[],\n value=None,\n dynamic=True,\n required=True,\n ),\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate.\",\n range_spec=RangeSpec(min=1, max=4096),\n value=1000,\n ),\n StrInput(\n name=\"stop_sequence\",\n display_name=\"Stop Sequence\",\n advanced=True,\n info=\"Sequence where generation should stop.\",\n field_type=\"str\",\n ),\n SliderInput(\n name=\"temperature\",\n display_name=\"Temperature\",\n info=\"Controls randomness, higher values increase diversity.\",\n value=0.1,\n range_spec=RangeSpec(min=0, max=2, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"top_p\",\n display_name=\"Top P\",\n info=\"The cumulative probability cutoff for token selection. \"\n \"Lower values mean sampling from a smaller, more top-weighted nucleus.\",\n value=0.9,\n range_spec=RangeSpec(min=0, max=1, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"frequency_penalty\",\n display_name=\"Frequency Penalty\",\n info=\"Penalty for frequency of token usage.\",\n value=0.5,\n range_spec=RangeSpec(min=-2.0, max=2.0, step=0.01),\n advanced=True,\n ),\n SliderInput(\n name=\"presence_penalty\",\n display_name=\"Presence Penalty\",\n info=\"Penalty for token presence in prior text.\",\n value=0.3,\n range_spec=RangeSpec(min=-2.0, max=2.0, step=0.01),\n advanced=True,\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Random Seed\",\n advanced=True,\n info=\"The random seed for the model.\",\n value=8,\n ),\n BoolInput(\n name=\"logprobs\",\n display_name=\"Log Probabilities\",\n advanced=True,\n info=\"Whether to return log probabilities of the output tokens.\",\n value=True,\n ),\n IntInput(\n name=\"top_logprobs\",\n display_name=\"Top Log Probabilities\",\n advanced=True,\n info=\"Number of most likely tokens to return at each position.\",\n value=3,\n range_spec=RangeSpec(min=1, max=20),\n ),\n StrInput(\n name=\"logit_bias\",\n display_name=\"Logit Bias\",\n advanced=True,\n info='JSON string of token IDs to bias or suppress (e.g., {\"1003\": -100, \"1004\": 100}).',\n field_type=\"str\",\n ),\n ]\n\n @staticmethod\n def fetch_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\"version\": \"2024-09-16\", \"filters\": \"function_text_chat,!lifecycle_withdrawn\"}\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models. Using default models.\")\n return WatsonxAIComponent._default_models\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None):\n \"\"\"Update model options when URL or API key changes.\"\"\"\n logger.info(\"Updating build config. Field name: %s, Field value: %s\", field_name, field_value)\n\n if field_name == \"url\" and field_value:\n try:\n models = self.fetch_models(base_url=build_config.url.value)\n build_config.model_name.options = models\n if build_config.model_name.value:\n build_config.model_name.value = models[0]\n info_message = f\"Updated model options: {len(models)} models found in {build_config.url.value}\"\n logger.info(info_message)\n except Exception: # noqa: BLE001\n logger.exception(\"Error updating model options.\")\n\n def build_model(self) -> LanguageModel:\n # Parse logit_bias from JSON string if provided\n logit_bias = None\n if hasattr(self, \"logit_bias\") and self.logit_bias:\n try:\n logit_bias = json.loads(self.logit_bias)\n except json.JSONDecodeError:\n logger.warning(\"Invalid logit_bias JSON format. Using default instead.\")\n logit_bias = {\"1003\": -100, \"1004\": -100}\n\n chat_params = {\n \"max_tokens\": getattr(self, \"max_tokens\", None),\n \"temperature\": getattr(self, \"temperature\", None),\n \"top_p\": getattr(self, \"top_p\", None),\n \"frequency_penalty\": getattr(self, \"frequency_penalty\", None),\n \"presence_penalty\": getattr(self, \"presence_penalty\", None),\n \"seed\": getattr(self, \"seed\", None),\n \"stop\": [self.stop_sequence] if self.stop_sequence else [],\n \"n\": 1,\n \"logprobs\": getattr(self, \"logprobs\", True),\n \"top_logprobs\": getattr(self, \"top_logprobs\", None),\n \"time_limit\": 600000,\n \"logit_bias\": logit_bias,\n }\n\n return ChatWatsonx(\n apikey=SecretStr(self.api_key).get_secret_value(),\n url=self.url,\n project_id=self.project_id,\n model_id=self.model_name,\n params=chat_params,\n streaming=self.stream,\n )\n"
- },
- "frequency_penalty": {
- "_input_type": "SliderInput",
- "advanced": true,
- "display_name": "Frequency Penalty",
- "dynamic": false,
- "info": "Penalty for frequency of token usage.",
- "max_label": "",
- "max_label_icon": "",
- "min_label": "",
- "min_label_icon": "",
- "name": "frequency_penalty",
- "placeholder": "",
- "range_spec": {
- "max": 2,
- "min": -2,
- "step": 0.01,
- "step_type": "float"
- },
- "required": false,
- "show": true,
- "slider_buttons": false,
- "slider_buttons_options": [],
- "slider_input": false,
- "title_case": false,
- "tool_mode": false,
- "type": "slider",
- "value": 0.5
- },
- "input_value": {
- "_input_type": "MessageInput",
- "advanced": false,
- "display_name": "Input",
- "dynamic": false,
- "info": "",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "input_value",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "logit_bias": {
- "_input_type": "StrInput",
- "advanced": true,
- "display_name": "Logit Bias",
- "dynamic": false,
- "info": "JSON string of token IDs to bias or suppress (e.g., {\"1003\": -100, \"1004\": 100}).",
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "logit_bias",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "logprobs": {
- "_input_type": "BoolInput",
- "advanced": true,
- "display_name": "Log Probabilities",
- "dynamic": false,
- "info": "Whether to return log probabilities of the output tokens.",
- "list": false,
- "list_add_label": "Add More",
- "name": "logprobs",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "bool",
- "value": true
- },
- "max_tokens": {
- "_input_type": "IntInput",
- "advanced": true,
- "display_name": "Max Tokens",
- "dynamic": false,
- "info": "The maximum number of tokens to generate.",
- "list": false,
- "list_add_label": "Add More",
- "name": "max_tokens",
- "placeholder": "",
- "range_spec": {
- "max": 4096,
- "min": 1,
- "step": 0.1,
- "step_type": "float"
- },
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "int",
- "value": 1000
- },
- "model_name": {
- "_input_type": "DropdownInput",
- "advanced": false,
- "combobox": false,
- "dialog_inputs": {},
- "display_name": "Model Name",
- "dynamic": true,
- "info": "",
- "name": "model_name",
- "options": [
- "ibm/granite-3-2-8b-instruct",
- "ibm/granite-3-2b-instruct",
- "ibm/granite-3-3-8b-instruct",
- "ibm/granite-3-8b-instruct",
- "ibm/granite-guardian-3-2b",
- "ibm/granite-guardian-3-8b",
- "ibm/granite-vision-3-2-2b",
- "meta-llama/llama-3-2-11b-vision-instruct",
- "meta-llama/llama-3-2-90b-vision-instruct",
- "meta-llama/llama-3-3-70b-instruct",
- "meta-llama/llama-3-405b-instruct",
- "meta-llama/llama-4-maverick-17b-128e-instruct-fp8",
- "meta-llama/llama-guard-3-11b-vision",
- "mistralai/mistral-large",
- "mistralai/mistral-medium-2505",
- "mistralai/mistral-small-3-1-24b-instruct-2503",
- "mistralai/pixtral-12b",
- "openai/gpt-oss-120b"
- ],
- "options_metadata": [],
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "toggle": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "str",
- "value": "ibm/granite-3-2-8b-instruct"
- },
- "presence_penalty": {
- "_input_type": "SliderInput",
- "advanced": true,
- "display_name": "Presence Penalty",
- "dynamic": false,
- "info": "Penalty for token presence in prior text.",
- "max_label": "",
- "max_label_icon": "",
- "min_label": "",
- "min_label_icon": "",
- "name": "presence_penalty",
- "placeholder": "",
- "range_spec": {
- "max": 2,
- "min": -2,
- "step": 0.01,
- "step_type": "float"
- },
- "required": false,
- "show": true,
- "slider_buttons": false,
- "slider_buttons_options": [],
- "slider_input": false,
- "title_case": false,
- "tool_mode": false,
- "type": "slider",
- "value": 0.3
- },
- "project_id": {
- "_input_type": "StrInput",
- "advanced": false,
- "display_name": "watsonx Project ID",
- "dynamic": false,
- "info": "The project ID or deployment space ID that is associated with the foundation model.",
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": true,
- "name": "project_id",
- "placeholder": "",
- "required": true,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "str",
- "value": "WATSONX_PROJECT_ID"
- },
- "seed": {
- "_input_type": "IntInput",
- "advanced": true,
- "display_name": "Random Seed",
- "dynamic": false,
- "info": "The random seed for the model.",
- "list": false,
- "list_add_label": "Add More",
- "name": "seed",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "int",
- "value": 8
- },
- "stop_sequence": {
- "_input_type": "StrInput",
- "advanced": true,
- "display_name": "Stop Sequence",
- "dynamic": false,
- "info": "Sequence where generation should stop.",
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "name": "stop_sequence",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "stream": {
- "_input_type": "BoolInput",
- "advanced": true,
- "display_name": "Stream",
- "dynamic": false,
- "info": "Stream the response from the model. Streaming works only in Chat.",
- "list": false,
- "list_add_label": "Add More",
- "name": "stream",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "bool",
- "value": false
- },
- "system_message": {
- "_input_type": "MultilineInput",
- "advanced": false,
- "copy_field": false,
- "display_name": "System Message",
- "dynamic": false,
- "info": "System message to pass to the model.",
- "input_types": [
- "Message"
- ],
- "list": false,
- "list_add_label": "Add More",
- "load_from_db": false,
- "multiline": true,
- "name": "system_message",
- "placeholder": "",
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_input": true,
- "trace_as_metadata": true,
- "type": "str",
- "value": ""
- },
- "temperature": {
- "_input_type": "SliderInput",
- "advanced": true,
- "display_name": "Temperature",
- "dynamic": false,
- "info": "Controls randomness, higher values increase diversity.",
- "max_label": "",
- "max_label_icon": "",
- "min_label": "",
- "min_label_icon": "",
- "name": "temperature",
- "placeholder": "",
- "range_spec": {
- "max": 2,
- "min": 0,
- "step": 0.01,
- "step_type": "float"
- },
- "required": false,
- "show": true,
- "slider_buttons": false,
- "slider_buttons_options": [],
- "slider_input": false,
- "title_case": false,
- "tool_mode": false,
- "type": "slider",
- "value": 0.1
- },
- "top_logprobs": {
- "_input_type": "IntInput",
- "advanced": true,
- "display_name": "Top Log Probabilities",
- "dynamic": false,
- "info": "Number of most likely tokens to return at each position.",
- "list": false,
- "list_add_label": "Add More",
- "name": "top_logprobs",
- "placeholder": "",
- "range_spec": {
- "max": 20,
- "min": 1,
- "step": 0.1,
- "step_type": "float"
- },
- "required": false,
- "show": true,
- "title_case": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "int",
- "value": 3
- },
- "top_p": {
- "_input_type": "SliderInput",
- "advanced": true,
- "display_name": "Top P",
- "dynamic": false,
- "info": "The cumulative probability cutoff for token selection. Lower values mean sampling from a smaller, more top-weighted nucleus.",
- "max_label": "",
- "max_label_icon": "",
- "min_label": "",
- "min_label_icon": "",
- "name": "top_p",
- "placeholder": "",
- "range_spec": {
- "max": 1,
- "min": 0,
- "step": 0.01,
- "step_type": "float"
- },
- "required": false,
- "show": true,
- "slider_buttons": false,
- "slider_buttons_options": [],
- "slider_input": false,
- "title_case": false,
- "tool_mode": false,
- "type": "slider",
- "value": 0.9
- },
- "url": {
- "_input_type": "DropdownInput",
- "advanced": false,
- "combobox": false,
- "dialog_inputs": {},
- "display_name": "watsonx API Endpoint",
- "dynamic": false,
- "info": "The base URL of the API.",
- "name": "url",
- "options": [
- "https://us-south.ml.cloud.ibm.com",
- "https://eu-de.ml.cloud.ibm.com",
- "https://eu-gb.ml.cloud.ibm.com",
- "https://au-syd.ml.cloud.ibm.com",
- "https://jp-tok.ml.cloud.ibm.com",
- "https://ca-tor.ml.cloud.ibm.com"
- ],
- "options_metadata": [],
- "placeholder": "",
- "real_time_refresh": true,
- "required": false,
- "show": true,
- "title_case": false,
- "toggle": false,
- "tool_mode": false,
- "trace_as_metadata": true,
- "type": "str",
- "value": "https://us-south.ml.cloud.ibm.com"
- }
+ "logit_bias": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "logit_bias",
+ "value": "",
+ "display_name": "Logit Bias",
+ "advanced": true,
+ "dynamic": false,
+ "info": "JSON string of token IDs to bias or suppress (e.g., {\"1003\": -100, \"1004\": 100}).",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "StrInput"
},
- "tool_mode": false
+ "logprobs": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "logprobs",
+ "value": true,
+ "display_name": "Log Probabilities",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Whether to return log probabilities of the output tokens.",
+ "title_case": false,
+ "type": "bool",
+ "_input_type": "BoolInput"
+ },
+ "max_tokens": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "range_spec": {
+ "step_type": "float",
+ "min": 1,
+ "max": 4096,
+ "step": 0.1
+ },
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "max_tokens",
+ "value": 1000,
+ "display_name": "Max Tokens",
+ "advanced": true,
+ "dynamic": false,
+ "info": "The maximum number of tokens to generate.",
+ "title_case": false,
+ "type": "int",
+ "_input_type": "IntInput"
+ },
+ "model_name": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "options": [],
+ "options_metadata": [],
+ "combobox": false,
+ "dialog_inputs": {},
+ "toggle": false,
+ "required": true,
+ "placeholder": "",
+ "show": true,
+ "name": "model_name",
+ "display_name": "Model Name",
+ "advanced": false,
+ "dynamic": true,
+ "info": "",
+ "title_case": false,
+ "external_options": {},
+ "type": "str",
+ "_input_type": "DropdownInput"
+ },
+ "presence_penalty": {
+ "tool_mode": false,
+ "min_label": "",
+ "max_label": "",
+ "min_label_icon": "",
+ "max_label_icon": "",
+ "slider_buttons": false,
+ "slider_buttons_options": [],
+ "slider_input": false,
+ "range_spec": {
+ "step_type": "float",
+ "min": -2,
+ "max": 2,
+ "step": 0.01
+ },
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "presence_penalty",
+ "value": 0.3,
+ "display_name": "Presence Penalty",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Penalty for token presence in prior text.",
+ "title_case": false,
+ "type": "slider",
+ "_input_type": "SliderInput"
+ },
+ "project_id": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "load_from_db": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": true,
+ "placeholder": "",
+ "show": true,
+ "name": "project_id",
+ "value": "WATSONX_PROJECT_ID",
+ "display_name": "watsonx Project ID",
+ "advanced": false,
+ "dynamic": false,
+ "info": "The project ID or deployment space ID that is associated with the foundation model.",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "StrInput"
+ },
+ "seed": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "seed",
+ "value": 8,
+ "display_name": "Random Seed",
+ "advanced": true,
+ "dynamic": false,
+ "info": "The random seed for the model.",
+ "title_case": false,
+ "type": "int",
+ "_input_type": "IntInput"
+ },
+ "stop_sequence": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "stop_sequence",
+ "value": "",
+ "display_name": "Stop Sequence",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Sequence where generation should stop.",
+ "title_case": false,
+ "type": "str",
+ "_input_type": "StrInput"
+ },
+ "stream": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "stream",
+ "value": false,
+ "display_name": "Stream",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Stream the response from the model. Streaming works only in Chat.",
+ "title_case": false,
+ "type": "bool",
+ "_input_type": "BoolInput"
+ },
+ "system_message": {
+ "tool_mode": false,
+ "trace_as_input": true,
+ "multiline": true,
+ "trace_as_metadata": true,
+ "load_from_db": false,
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "system_message",
+ "value": "",
+ "display_name": "System Message",
+ "advanced": false,
+ "input_types": [
+ "Message"
+ ],
+ "dynamic": false,
+ "info": "System message to pass to the model.",
+ "title_case": false,
+ "copy_field": false,
+ "type": "str",
+ "_input_type": "MultilineInput"
+ },
+ "temperature": {
+ "tool_mode": false,
+ "min_label": "",
+ "max_label": "",
+ "min_label_icon": "",
+ "max_label_icon": "",
+ "slider_buttons": false,
+ "slider_buttons_options": [],
+ "slider_input": false,
+ "range_spec": {
+ "step_type": "float",
+ "min": 0,
+ "max": 2,
+ "step": 0.01
+ },
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "temperature",
+ "value": 0.1,
+ "display_name": "Temperature",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Controls randomness, higher values increase diversity.",
+ "title_case": false,
+ "type": "slider",
+ "_input_type": "SliderInput"
+ },
+ "top_logprobs": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "range_spec": {
+ "step_type": "float",
+ "min": 1,
+ "max": 20,
+ "step": 0.1
+ },
+ "list": false,
+ "list_add_label": "Add More",
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "top_logprobs",
+ "value": 3,
+ "display_name": "Top Log Probabilities",
+ "advanced": true,
+ "dynamic": false,
+ "info": "Number of most likely tokens to return at each position.",
+ "title_case": false,
+ "type": "int",
+ "_input_type": "IntInput"
+ },
+ "top_p": {
+ "tool_mode": false,
+ "min_label": "",
+ "max_label": "",
+ "min_label_icon": "",
+ "max_label_icon": "",
+ "slider_buttons": false,
+ "slider_buttons_options": [],
+ "slider_input": false,
+ "range_spec": {
+ "step_type": "float",
+ "min": 0,
+ "max": 1,
+ "step": 0.01
+ },
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "top_p",
+ "value": 0.9,
+ "display_name": "Top P",
+ "advanced": true,
+ "dynamic": false,
+ "info": "The cumulative probability cutoff for token selection. Lower values mean sampling from a smaller, more top-weighted nucleus.",
+ "title_case": false,
+ "type": "slider",
+ "_input_type": "SliderInput"
+ },
+ "url": {
+ "tool_mode": false,
+ "trace_as_metadata": true,
+ "options": [
+ "https://us-south.ml.cloud.ibm.com",
+ "https://eu-de.ml.cloud.ibm.com",
+ "https://eu-gb.ml.cloud.ibm.com",
+ "https://au-syd.ml.cloud.ibm.com",
+ "https://jp-tok.ml.cloud.ibm.com",
+ "https://ca-tor.ml.cloud.ibm.com"
+ ],
+ "options_metadata": [],
+ "combobox": false,
+ "dialog_inputs": {},
+ "toggle": false,
+ "required": false,
+ "placeholder": "",
+ "show": true,
+ "name": "url",
+ "display_name": "watsonx API Endpoint",
+ "advanced": false,
+ "dynamic": false,
+ "info": "The base URL of the API.",
+ "real_time_refresh": true,
+ "title_case": false,
+ "external_options": {},
+ "type": "str",
+ "_input_type": "DropdownInput"
+ }
},
- "selected_output": "text_output",
- "showNode": true,
- "type": "IBMwatsonxModel"
+ "description": "Generate text using IBM watsonx.ai foundation models.",
+ "icon": "WatsonxAI",
+ "base_classes": [
+ "LanguageModel",
+ "Message"
+ ],
+ "display_name": "IBM watsonx.ai",
+ "documentation": "",
+ "minimized": false,
+ "custom_fields": {},
+ "output_types": [],
+ "pinned": false,
+ "conditional_paths": [],
+ "frozen": false,
+ "outputs": [
+ {
+ "types": [
+ "Message"
+ ],
+ "selected": "Message",
+ "name": "text_output",
+ "display_name": "Model Response",
+ "method": "text_response",
+ "value": "__UNDEFINED__",
+ "cache": true,
+ "allows_loop": false,
+ "group_outputs": false,
+ "tool_mode": true
+ },
+ {
+ "types": [
+ "LanguageModel"
+ ],
+ "name": "model_output",
+ "display_name": "Language Model",
+ "method": "build_model",
+ "value": "__UNDEFINED__",
+ "cache": true,
+ "allows_loop": false,
+ "group_outputs": false,
+ "tool_mode": true
+ }
+ ],
+ "field_order": [
+ "input_value",
+ "system_message",
+ "stream",
+ "url",
+ "project_id",
+ "api_key",
+ "model_name",
+ "max_tokens",
+ "stop_sequence",
+ "temperature",
+ "top_p",
+ "frequency_penalty",
+ "presence_penalty",
+ "seed",
+ "logprobs",
+ "top_logprobs",
+ "logit_bias"
+ ],
+ "beta": false,
+ "legacy": false,
+ "edited": false,
+ "metadata": {
+ "keywords": [
+ "model",
+ "llm",
+ "language model",
+ "large language model"
+ ],
+ "module": "lfx.components.ibm.watsonx.WatsonxAIComponent",
+ "code_hash": "85c24939214c",
+ "dependencies": {
+ "total_dependencies": 4,
+ "dependencies": [
+ {
+ "name": "requests",
+ "version": "2.32.5"
+ },
+ {
+ "name": "langchain_ibm",
+ "version": "0.3.16"
+ },
+ {
+ "name": "pydantic",
+ "version": "2.10.6"
+ },
+ {
+ "name": "lfx",
+ "version": null
+ }
+ ]
+ }
+ },
+ "tool_mode": false,
+ "official": false
},
- "dragging": false,
- "id": "IBMwatsonxModel-18kmA",
- "measured": {
- "height": 632,
- "width": 320
- },
- "position": {
- "x": 370.8989669694083,
- "y": 184
- },
- "selected": true,
- "type": "genericNode"
- }
\ No newline at end of file
+ "showNode": true,
+ "type": "IBMwatsonxModel",
+ "id": "IBMwatsonxModel-qXZxc",
+ "selected_output": "text_output"
+ },
+ "id": "IBMwatsonxModel-qXZxc",
+ "position": {
+ "x": 0,
+ "y": 0
+ },
+ "type": "genericNode"
+}
\ No newline at end of file
diff --git a/flows/ingestion_flow.json b/flows/ingestion_flow.json
index fb155916..12cf5b63 100644
--- a/flows/ingestion_flow.json
+++ b/flows/ingestion_flow.json
@@ -61,6 +61,8 @@
"targetHandle": "{œfieldNameœ:œdata_inputsœ,œidœ:œSplitText-QIKhgœ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}"
},
{
+ "animated": false,
+ "className": "",
"data": {
"sourceHandle": {
"dataType": "EmbeddingModel",
@@ -80,10 +82,155 @@
}
},
"id": "xy-edge__EmbeddingModel-eZ6bT{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-eZ6bTœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-OpenSearchHybrid-Ve6bS{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchHybrid-Ve6bSœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}",
+ "selected": false,
"source": "EmbeddingModel-eZ6bT",
"sourceHandle": "{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-eZ6bTœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}",
"target": "OpenSearchHybrid-Ve6bS",
"targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchHybrid-Ve6bSœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}"
+ },
+ {
+ "animated": false,
+ "className": "",
+ "data": {
+ "sourceHandle": {
+ "dataType": "SecretInput",
+ "id": "SecretInput-F34VJ",
+ "name": "text",
+ "output_types": [
+ "Message"
+ ]
+ },
+ "targetHandle": {
+ "fieldName": "dynamic_connector_type",
+ "id": "AdvancedDynamicFormBuilder-81Exw",
+ "inputTypes": [
+ "Text",
+ "Message"
+ ],
+ "type": "str"
+ }
+ },
+ "id": "xy-edge__SecretInput-F34VJ{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-F34VJœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-81Exw{œfieldNameœ:œdynamic_connector_typeœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}",
+ "selected": false,
+ "source": "SecretInput-F34VJ",
+ "sourceHandle": "{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-F34VJœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}",
+ "target": "AdvancedDynamicFormBuilder-81Exw",
+ "targetHandle": "{œfieldNameœ:œdynamic_connector_typeœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}"
+ },
+ {
+ "animated": false,
+ "className": "",
+ "data": {
+ "sourceHandle": {
+ "dataType": "SecretInput",
+ "id": "SecretInput-b2cab",
+ "name": "text",
+ "output_types": [
+ "Message"
+ ]
+ },
+ "targetHandle": {
+ "fieldName": "dynamic_owner",
+ "id": "AdvancedDynamicFormBuilder-81Exw",
+ "inputTypes": [
+ "Text",
+ "Message"
+ ],
+ "type": "str"
+ }
+ },
+ "id": "xy-edge__SecretInput-b2cab{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-b2cabœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-81Exw{œfieldNameœ:œdynamic_ownerœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}",
+ "selected": false,
+ "source": "SecretInput-b2cab",
+ "sourceHandle": "{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-b2cabœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}",
+ "target": "AdvancedDynamicFormBuilder-81Exw",
+ "targetHandle": "{œfieldNameœ:œdynamic_ownerœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}"
+ },
+ {
+ "animated": false,
+ "className": "",
+ "data": {
+ "sourceHandle": {
+ "dataType": "SecretInput",
+ "id": "SecretInput-ZVfuS",
+ "name": "text",
+ "output_types": [
+ "Message"
+ ]
+ },
+ "targetHandle": {
+ "fieldName": "dynamic_owner_email",
+ "id": "AdvancedDynamicFormBuilder-81Exw",
+ "inputTypes": [
+ "Text",
+ "Message"
+ ],
+ "type": "str"
+ }
+ },
+ "id": "xy-edge__SecretInput-ZVfuS{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-ZVfuSœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-81Exw{œfieldNameœ:œdynamic_owner_emailœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}",
+ "selected": false,
+ "source": "SecretInput-ZVfuS",
+ "sourceHandle": "{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-ZVfuSœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}",
+ "target": "AdvancedDynamicFormBuilder-81Exw",
+ "targetHandle": "{œfieldNameœ:œdynamic_owner_emailœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}"
+ },
+ {
+ "animated": false,
+ "className": "",
+ "data": {
+ "sourceHandle": {
+ "dataType": "SecretInput",
+ "id": "SecretInput-Iqtxd",
+ "name": "text",
+ "output_types": [
+ "Message"
+ ]
+ },
+ "targetHandle": {
+ "fieldName": "dynamic_owner_name",
+ "id": "AdvancedDynamicFormBuilder-81Exw",
+ "inputTypes": [
+ "Text",
+ "Message"
+ ],
+ "type": "str"
+ }
+ },
+ "id": "xy-edge__SecretInput-Iqtxd{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-Iqtxdœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-81Exw{œfieldNameœ:œdynamic_owner_nameœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}",
+ "selected": false,
+ "source": "SecretInput-Iqtxd",
+ "sourceHandle": "{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-Iqtxdœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}",
+ "target": "AdvancedDynamicFormBuilder-81Exw",
+ "targetHandle": "{œfieldNameœ:œdynamic_owner_nameœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}"
+ },
+ {
+ "animated": false,
+ "className": "not-running",
+ "data": {
+ "sourceHandle": {
+ "dataType": "AdvancedDynamicFormBuilder",
+ "id": "AdvancedDynamicFormBuilder-81Exw",
+ "name": "form_data",
+ "output_types": [
+ "Data"
+ ]
+ },
+ "targetHandle": {
+ "fieldName": "docs_metadata",
+ "id": "OpenSearchHybrid-Ve6bS",
+ "inputTypes": [
+ "Data"
+ ],
+ "type": "table"
+ }
+ },
+ "id": "xy-edge__AdvancedDynamicFormBuilder-81Exw{œdataTypeœ:œAdvancedDynamicFormBuilderœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œnameœ:œform_dataœ,œoutput_typesœ:[œDataœ]}-OpenSearchHybrid-Ve6bS{œfieldNameœ:œdocs_metadataœ,œidœ:œOpenSearchHybrid-Ve6bSœ,œinputTypesœ:[œDataœ],œtypeœ:œtableœ}",
+ "selected": false,
+ "source": "AdvancedDynamicFormBuilder-81Exw",
+ "sourceHandle": "{œdataTypeœ:œAdvancedDynamicFormBuilderœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œnameœ:œform_dataœ,œoutput_typesœ:[œDataœ]}",
+ "target": "OpenSearchHybrid-Ve6bS",
+ "targetHandle": "{œfieldNameœ:œdocs_metadataœ,œidœ:œOpenSearchHybrid-Ve6bSœ,œinputTypesœ:[œDataœ],œtypeœ:œtableœ}"
}
],
"nodes": [
@@ -114,9 +261,9 @@
"frozen": false,
"icon": "scissors-line-dashed",
"legacy": false,
- "lf_version": "1.5.0.post2",
+ "lf_version": "1.6.0",
"metadata": {
- "code_hash": "65a90e1f4fe6",
+ "code_hash": "f2867efda61f",
"dependencies": {
"dependencies": [
{
@@ -124,8 +271,8 @@
"version": "0.3.9"
},
{
- "name": "langflow",
- "version": "1.5.0.post2"
+ "name": "lfx",
+ "version": null
}
],
"total_dependencies": 2
@@ -208,7 +355,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "from langchain_text_splitters import CharacterTextSplitter\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.io import DropdownInput, HandleInput, IntInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.utils.util import unescape_string\n\n\nclass SplitTextComponent(Component):\n display_name: str = \"Split Text\"\n description: str = \"Split text into chunks based on specified criteria.\"\n documentation: str = \"https://docs.langflow.org/components-processing#split-text\"\n icon = \"scissors-line-dashed\"\n name = \"SplitText\"\n\n inputs = [\n HandleInput(\n name=\"data_inputs\",\n display_name=\"Input\",\n info=\"The data with texts to split in chunks.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n IntInput(\n name=\"chunk_overlap\",\n display_name=\"Chunk Overlap\",\n info=\"Number of characters to overlap between chunks.\",\n value=200,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n info=(\n \"The maximum length of each chunk. Text is first split by separator, \"\n \"then chunks are merged up to this size. \"\n \"Individual splits larger than this won't be further divided.\"\n ),\n value=1000,\n ),\n MessageTextInput(\n name=\"separator\",\n display_name=\"Separator\",\n info=(\n \"The character to split on. Use \\\\n for newline. \"\n \"Examples: \\\\n\\\\n for paragraphs, \\\\n for lines, . for sentences\"\n ),\n value=\"\\n\",\n ),\n MessageTextInput(\n name=\"text_key\",\n display_name=\"Text Key\",\n info=\"The key to use for the text column.\",\n value=\"text\",\n advanced=True,\n ),\n DropdownInput(\n name=\"keep_separator\",\n display_name=\"Keep Separator\",\n info=\"Whether to keep the separator in the output chunks and where to place it.\",\n options=[\"False\", \"True\", \"Start\", \"End\"],\n value=\"False\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Chunks\", name=\"dataframe\", method=\"split_text\"),\n ]\n\n def _docs_to_data(self, docs) -> list[Data]:\n data_list = [Data(text=doc.page_content, data=doc.metadata) for doc in docs]\n return data_list\n\n def _fix_separator(self, separator: str) -> str:\n \"\"\"Fix common separator issues and convert to proper format.\"\"\"\n if separator == \"/n\":\n return \"\\n\"\n if separator == \"/t\":\n return \"\\t\"\n return separator\n\n def split_text_base(self):\n separator = self._fix_separator(self.separator)\n separator = unescape_string(separator)\n\n if isinstance(self.data_inputs, DataFrame):\n if not len(self.data_inputs):\n msg = \"DataFrame is empty\"\n raise TypeError(msg)\n\n self.data_inputs.text_key = self.text_key\n try:\n documents = self.data_inputs.to_lc_documents()\n except Exception as e:\n msg = f\"Error converting DataFrame to documents: {e}\"\n raise TypeError(msg) from e\n elif isinstance(self.data_inputs, Message):\n self.data_inputs = [self.data_inputs.to_data()]\n return self.split_text_base()\n else:\n if not self.data_inputs:\n msg = \"No data inputs provided\"\n raise TypeError(msg)\n\n documents = []\n if isinstance(self.data_inputs, Data):\n self.data_inputs.text_key = self.text_key\n documents = [self.data_inputs.to_lc_document()]\n else:\n try:\n documents = [input_.to_lc_document() for input_ in self.data_inputs if isinstance(input_, Data)]\n if not documents:\n msg = f\"No valid Data inputs found in {type(self.data_inputs)}\"\n raise TypeError(msg)\n except AttributeError as e:\n msg = f\"Invalid input type in collection: {e}\"\n raise TypeError(msg) from e\n try:\n # Convert string 'False'/'True' to boolean\n keep_sep = self.keep_separator\n if isinstance(keep_sep, str):\n if keep_sep.lower() == \"false\":\n keep_sep = False\n elif keep_sep.lower() == \"true\":\n keep_sep = True\n # 'start' and 'end' are kept as strings\n self.log(documents)\n splitter = CharacterTextSplitter(\n chunk_overlap=self.chunk_overlap,\n chunk_size=self.chunk_size,\n separator=separator,\n keep_separator=keep_sep,\n )\n return splitter.split_documents(documents)\n except Exception as e:\n msg = f\"Error splitting text: {e}\"\n raise TypeError(msg) from e\n\n def split_text(self) -> DataFrame:\n return DataFrame(self._docs_to_data(self.split_text_base()))\n"
+ "value": "from langchain_text_splitters import CharacterTextSplitter\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.io import DropdownInput, HandleInput, IntInput, MessageTextInput, Output\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.utils.util import unescape_string\n\n\nclass SplitTextComponent(Component):\n display_name: str = \"Split Text\"\n description: str = \"Split text into chunks based on specified criteria.\"\n documentation: str = \"https://docs.langflow.org/components-processing#split-text\"\n icon = \"scissors-line-dashed\"\n name = \"SplitText\"\n\n inputs = [\n HandleInput(\n name=\"data_inputs\",\n display_name=\"Input\",\n info=\"The data with texts to split in chunks.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n IntInput(\n name=\"chunk_overlap\",\n display_name=\"Chunk Overlap\",\n info=\"Number of characters to overlap between chunks.\",\n value=200,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n info=(\n \"The maximum length of each chunk. Text is first split by separator, \"\n \"then chunks are merged up to this size. \"\n \"Individual splits larger than this won't be further divided.\"\n ),\n value=1000,\n ),\n MessageTextInput(\n name=\"separator\",\n display_name=\"Separator\",\n info=(\n \"The character to split on. Use \\\\n for newline. \"\n \"Examples: \\\\n\\\\n for paragraphs, \\\\n for lines, . for sentences\"\n ),\n value=\"\\n\",\n ),\n MessageTextInput(\n name=\"text_key\",\n display_name=\"Text Key\",\n info=\"The key to use for the text column.\",\n value=\"text\",\n advanced=True,\n ),\n DropdownInput(\n name=\"keep_separator\",\n display_name=\"Keep Separator\",\n info=\"Whether to keep the separator in the output chunks and where to place it.\",\n options=[\"False\", \"True\", \"Start\", \"End\"],\n value=\"False\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Chunks\", name=\"dataframe\", method=\"split_text\"),\n ]\n\n def _docs_to_data(self, docs) -> list[Data]:\n return [Data(text=doc.page_content, data=doc.metadata) for doc in docs]\n\n def _fix_separator(self, separator: str) -> str:\n \"\"\"Fix common separator issues and convert to proper format.\"\"\"\n if separator == \"/n\":\n return \"\\n\"\n if separator == \"/t\":\n return \"\\t\"\n return separator\n\n def split_text_base(self):\n separator = self._fix_separator(self.separator)\n separator = unescape_string(separator)\n\n if isinstance(self.data_inputs, DataFrame):\n if not len(self.data_inputs):\n msg = \"DataFrame is empty\"\n raise TypeError(msg)\n\n self.data_inputs.text_key = self.text_key\n try:\n documents = self.data_inputs.to_lc_documents()\n except Exception as e:\n msg = f\"Error converting DataFrame to documents: {e}\"\n raise TypeError(msg) from e\n elif isinstance(self.data_inputs, Message):\n self.data_inputs = [self.data_inputs.to_data()]\n return self.split_text_base()\n else:\n if not self.data_inputs:\n msg = \"No data inputs provided\"\n raise TypeError(msg)\n\n documents = []\n if isinstance(self.data_inputs, Data):\n self.data_inputs.text_key = self.text_key\n documents = [self.data_inputs.to_lc_document()]\n else:\n try:\n documents = [input_.to_lc_document() for input_ in self.data_inputs if isinstance(input_, Data)]\n if not documents:\n msg = f\"No valid Data inputs found in {type(self.data_inputs)}\"\n raise TypeError(msg)\n except AttributeError as e:\n msg = f\"Invalid input type in collection: {e}\"\n raise TypeError(msg) from e\n try:\n # Convert string 'False'/'True' to boolean\n keep_sep = self.keep_separator\n if isinstance(keep_sep, str):\n if keep_sep.lower() == \"false\":\n keep_sep = False\n elif keep_sep.lower() == \"true\":\n keep_sep = True\n # 'start' and 'end' are kept as strings\n\n splitter = CharacterTextSplitter(\n chunk_overlap=self.chunk_overlap,\n chunk_size=self.chunk_size,\n separator=separator,\n keep_separator=keep_sep,\n )\n return splitter.split_documents(documents)\n except Exception as e:\n msg = f\"Error splitting text: {e}\"\n raise TypeError(msg) from e\n\n def split_text(self) -> DataFrame:\n return DataFrame(self._docs_to_data(self.split_text_base()))\n"
},
"data_inputs": {
"_input_type": "HandleInput",
@@ -239,6 +386,7 @@
"dialog_inputs": {},
"display_name": "Keep Separator",
"dynamic": false,
+ "external_options": {},
"info": "Whether to keep the separator in the output chunks and where to place it.",
"name": "keep_separator",
"options": [
@@ -339,7 +487,7 @@
"beta": false,
"conditional_paths": [],
"custom_fields": {},
- "description": "Loads content from files with optional advanced document processing and export using Docling.",
+ "description": "Loads content from one or more files.",
"display_name": "File",
"documentation": "https://docs.langflow.org/components-data#file",
"edited": true,
@@ -363,23 +511,19 @@
],
"frozen": false,
"icon": "file-text",
- "last_updated": "2025-09-22T15:54:40.920Z",
+ "last_updated": "2025-09-26T14:37:42.811Z",
"legacy": false,
- "lf_version": "1.5.0.post2",
+ "lf_version": "1.6.0",
"metadata": {
- "code_hash": "086578fbbd54",
+ "code_hash": "9a1d497f4f91",
"dependencies": {
"dependencies": [
{
- "name": "langflow",
- "version": "1.5.0.post2"
- },
- {
- "name": "anyio",
- "version": "4.10.0"
+ "name": "lfx",
+ "version": null
}
],
- "total_dependencies": 2
+ "total_dependencies": 1
},
"module": "custom_components.file"
},
@@ -412,7 +556,7 @@
"advanced": false,
"display_name": "Advanced Parser",
"dynamic": false,
- "info": "Enable advanced document processing and export with Docling for PDFs, images, and office documents. Available only for single file processing.",
+ "info": "Enable advanced document processing and export with Docling for PDFs, images, and office documents. Available only for single file processing.Note that advanced document processing can consume significant resources.",
"list": false,
"list_add_label": "Add More",
"name": "advanced_mode",
@@ -442,7 +586,7 @@
"show": true,
"title_case": false,
"type": "code",
- "value": "\"\"\"Enhanced file component with clearer structure and Docling isolation.\n\nNotes:\n-----\n- Functionality is preserved with minimal behavioral changes.\n- ALL Docling parsing/export runs in a separate OS process to prevent memory\n growth and native library state from impacting the main Langflow process.\n- Standard text/structured parsing continues to use existing BaseFileComponent\n utilities (and optional threading via `parallel_load_data`).\n\"\"\"\n\nfrom __future__ import annotations\n\nimport json\nimport subprocess\nimport sys\nimport textwrap\nfrom copy import deepcopy\nfrom typing import TYPE_CHECKING, Any\n\nfrom langflow.base.data.base_file import BaseFileComponent\nfrom langflow.base.data.utils import TEXT_FILE_TYPES, parallel_load_data, parse_text_file_to_data\nfrom langflow.io import (\n BoolInput,\n DropdownInput,\n FileInput,\n IntInput,\n MessageTextInput,\n Output,\n StrInput,\n)\nfrom langflow.schema.data import Data\nfrom langflow.schema.message import Message\nimport anyio\nfrom langflow.services.storage.utils import build_content_type_from_extension\nif TYPE_CHECKING:\n from langflow.schema import DataFrame\n\n\nclass FileComponent(BaseFileComponent):\n \"\"\"File component with optional Docling processing (isolated in a subprocess).\"\"\"\n\n display_name = \"File\"\n description = \"Loads content from files with optional advanced document processing and export using Docling.\"\n documentation: str = \"https://docs.langflow.org/components-data#file\"\n icon = \"file-text\"\n name = \"File\"\n\n # Docling-supported/compatible extensions; TEXT_FILE_TYPES are supported by the base loader.\n VALID_EXTENSIONS = [\n \"adoc\",\n \"asciidoc\",\n \"asc\",\n \"bmp\",\n \"csv\",\n \"dotx\",\n \"dotm\",\n \"docm\",\n \"docx\",\n \"htm\",\n \"html\",\n \"jpeg\",\n \"json\",\n \"md\",\n \"pdf\",\n \"png\",\n \"potx\",\n \"ppsx\",\n \"pptm\",\n \"potm\",\n \"ppsm\",\n \"pptx\",\n \"tiff\",\n \"txt\",\n \"xls\",\n \"xlsx\",\n \"xhtml\",\n \"xml\",\n \"webp\",\n *TEXT_FILE_TYPES,\n ]\n\n # Fixed export settings used when markdown export is requested.\n EXPORT_FORMAT = \"Markdown\"\n IMAGE_MODE = \"placeholder\"\n\n # ---- Inputs / Outputs (kept as close to original as possible) -------------------\n _base_inputs = deepcopy(BaseFileComponent._base_inputs)\n for input_item in _base_inputs:\n if isinstance(input_item, FileInput) and input_item.name == \"path\":\n input_item.real_time_refresh = True\n break\n\n inputs = [\n *_base_inputs,\n BoolInput(\n name=\"advanced_mode\",\n display_name=\"Advanced Parser\",\n value=False,\n real_time_refresh=True,\n info=(\n \"Enable advanced document processing and export with Docling for PDFs, images, and office documents. \"\n \"Available only for single file processing.\"\n ),\n show=False,\n ),\n DropdownInput(\n name=\"pipeline\",\n display_name=\"Pipeline\",\n info=\"Docling pipeline to use\",\n options=[\"standard\", \"vlm\"],\n value=\"standard\",\n advanced=True,\n ),\n DropdownInput(\n name=\"ocr_engine\",\n display_name=\"OCR Engine\",\n info=\"OCR engine to use. Only available when pipeline is set to 'standard'.\",\n options=[\"\", \"easyocr\"],\n value=\"\",\n show=False,\n advanced=True,\n ),\n StrInput(\n name=\"md_image_placeholder\",\n display_name=\"Image placeholder\",\n info=\"Specify the image placeholder for markdown exports.\",\n value=\"\",\n advanced=True,\n show=False,\n ),\n StrInput(\n name=\"md_page_break_placeholder\",\n display_name=\"Page break placeholder\",\n info=\"Add this placeholder between pages in the markdown output.\",\n value=\"\",\n advanced=True,\n show=False,\n ),\n MessageTextInput(\n name=\"doc_key\",\n display_name=\"Doc Key\",\n info=\"The key to use for the DoclingDocument column.\",\n value=\"doc\",\n advanced=True,\n show=False,\n ),\n # Deprecated input retained for backward-compatibility.\n BoolInput(\n name=\"use_multithreading\",\n display_name=\"[Deprecated] Use Multithreading\",\n advanced=True,\n value=True,\n info=\"Set 'Processing Concurrency' greater than 1 to enable multithreading.\",\n ),\n IntInput(\n name=\"concurrency_multithreading\",\n display_name=\"Processing Concurrency\",\n advanced=True,\n info=\"When multiple files are being processed, the number of files to process concurrently.\",\n value=1,\n ),\n BoolInput(\n name=\"markdown\",\n display_name=\"Markdown Export\",\n info=\"Export processed documents to Markdown format. Only available when advanced mode is enabled.\",\n value=False,\n show=False,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Raw Content\", name=\"message\", method=\"load_files_message\"),\n ]\n\n # ------------------------------ UI helpers --------------------------------------\n\n def _path_value(self, template: dict) -> list[str]:\n \"\"\"Return the list of currently selected file paths from the template.\"\"\"\n return template.get(\"path\", {}).get(\"file_path\", [])\n\n def update_build_config(\n self,\n build_config: dict[str, Any],\n field_value: Any,\n field_name: str | None = None,\n ) -> dict[str, Any]:\n \"\"\"Show/hide Advanced Parser and related fields based on selection context.\"\"\"\n if field_name == \"path\":\n paths = self._path_value(build_config)\n file_path = paths[0] if paths else \"\"\n file_count = len(field_value) if field_value else 0\n\n # Advanced mode only for single (non-tabular) file\n allow_advanced = file_count == 1 and not file_path.endswith((\".csv\", \".xlsx\", \".parquet\"))\n build_config[\"advanced_mode\"][\"show\"] = allow_advanced\n if not allow_advanced:\n build_config[\"advanced_mode\"][\"value\"] = False\n for f in (\"pipeline\", \"ocr_engine\", \"doc_key\", \"md_image_placeholder\", \"md_page_break_placeholder\"):\n if f in build_config:\n build_config[f][\"show\"] = False\n\n elif field_name == \"advanced_mode\":\n for f in (\"pipeline\", \"ocr_engine\", \"doc_key\", \"md_image_placeholder\", \"md_page_break_placeholder\"):\n if f in build_config:\n build_config[f][\"show\"] = bool(field_value)\n\n return build_config\n\n def update_outputs(self, frontend_node: dict[str, Any], field_name: str, field_value: Any) -> dict[str, Any]: # noqa: ARG002\n \"\"\"Dynamically show outputs based on file count/type and advanced mode.\"\"\"\n if field_name not in [\"path\", \"advanced_mode\"]:\n return frontend_node\n\n template = frontend_node.get(\"template\", {})\n paths = self._path_value(template)\n if not paths:\n return frontend_node\n\n frontend_node[\"outputs\"] = []\n if len(paths) == 1:\n file_path = paths[0] if field_name == \"path\" else frontend_node[\"template\"][\"path\"][\"file_path\"][0]\n if file_path.endswith((\".csv\", \".xlsx\", \".parquet\")):\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Content\", name=\"dataframe\", method=\"load_files_structured\"),\n )\n elif file_path.endswith(\".json\"):\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Content\", name=\"json\", method=\"load_files_json\"),\n )\n\n advanced_mode = frontend_node.get(\"template\", {}).get(\"advanced_mode\", {}).get(\"value\", False)\n if advanced_mode:\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Output\", name=\"advanced\", method=\"load_files_advanced\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Markdown\", name=\"markdown\", method=\"load_files_markdown\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"File Path\", name=\"path\", method=\"load_files_path\"),\n )\n else:\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Raw Content\", name=\"message\", method=\"load_files_message\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"File Path\", name=\"path\", method=\"load_files_path\"),\n )\n else:\n # Multiple files => DataFrame output; advanced parser disabled\n frontend_node[\"outputs\"].append(Output(display_name=\"Files\", name=\"dataframe\", method=\"load_files\"))\n\n return frontend_node\n\n # ------------------------------ Core processing ----------------------------------\n\n def _is_docling_compatible(self, file_path: str) -> bool:\n \"\"\"Lightweight extension gate for Docling-compatible types.\"\"\"\n docling_exts = (\n \".adoc\",\n \".asciidoc\",\n \".asc\",\n \".bmp\",\n \".csv\",\n \".dotx\",\n \".dotm\",\n \".docm\",\n \".docx\",\n \".htm\",\n \".html\",\n \".jpeg\",\n \".json\",\n \".md\",\n \".pdf\",\n \".png\",\n \".potx\",\n \".ppsx\",\n \".pptm\",\n \".potm\",\n \".ppsm\",\n \".pptx\",\n \".tiff\",\n \".txt\",\n \".xls\",\n \".xlsx\",\n \".xhtml\",\n \".xml\",\n \".webp\",\n )\n return file_path.lower().endswith(docling_exts)\n\n def _process_docling_in_subprocess(self, file_path: str) -> Data | None:\n \"\"\"Run Docling in a separate OS process and map the result to a Data object.\n\n We avoid multiprocessing pickling by launching `python -c \"