From 9ae3b61695c4e9c2e4691d7396fd0ebe58524499 Mon Sep 17 00:00:00 2001 From: Edwin Jose Date: Mon, 24 Nov 2025 11:36:17 -0500 Subject: [PATCH 01/43] Update base image to langflow-nightly:1.7.0.dev19 Dockerfile now uses the newer langflow-nightly:1.7.0.dev19 image instead of 1.7.0.dev5 to ensure the latest features and fixes are included. --- Dockerfile.langflow | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.langflow b/Dockerfile.langflow index 5343585a..6119de84 100644 --- a/Dockerfile.langflow +++ b/Dockerfile.langflow @@ -1,4 +1,4 @@ -FROM langflowai/langflow-nightly:1.7.0.dev5 +FROM langflowai/langflow-nightly:1.7.0.dev19 EXPOSE 7860 From 71b67f418db1cf90556acfde72c64f12e2e263c8 Mon Sep 17 00:00:00 2001 From: Edwin Jose Date: Mon, 24 Nov 2025 16:40:36 -0500 Subject: [PATCH 02/43] Enhance embedding model component and Docker builds Switches Docker Compose services to local builds for backend, frontend, and langflow. Updates embedding model component to support IBM watsonx.ai features, including input token truncation and original text output, adds new dependencies, and improves configuration options in ingestion and agent flows. --- docker-compose.yml | 18 ++--- flows/ingestion_flow.json | 99 +++++++++++++++++++---- flows/openrag_agent.json | 162 +++++++++++++++++++++++++++++++------ flows/openrag_nudges.json | 97 ++++++++++++++++++---- flows/openrag_url_mcp.json | 95 ++++++++++++++++++---- 5 files changed, 390 insertions(+), 81 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 7ba0cea8..c3382bf3 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -45,9 +45,9 @@ services: openrag-backend: image: phact/openrag-backend:${OPENRAG_VERSION:-latest} - # build: - # context: . - # dockerfile: Dockerfile.backend + build: + context: . + dockerfile: Dockerfile.backend container_name: openrag-backend depends_on: - langflow @@ -89,9 +89,9 @@ services: openrag-frontend: image: phact/openrag-frontend:${OPENRAG_VERSION:-latest} - # build: - # context: . - # dockerfile: Dockerfile.frontend + build: + context: . + dockerfile: Dockerfile.frontend container_name: openrag-frontend depends_on: - openrag-backend @@ -104,9 +104,9 @@ services: volumes: - ./flows:/app/flows:U,z image: phact/openrag-langflow:${LANGFLOW_VERSION:-latest} - # build: - # context: . - # dockerfile: Dockerfile.langflow + build: + context: . + dockerfile: Dockerfile.langflow container_name: langflow ports: - "7860:7860" diff --git a/flows/ingestion_flow.json b/flows/ingestion_flow.json index abf51e90..d517c1aa 100644 --- a/flows/ingestion_flow.json +++ b/flows/ingestion_flow.json @@ -319,6 +319,7 @@ }, { "animated": false, + "className": "", "data": { "sourceHandle": { "dataType": "EmbeddingModel", @@ -1231,7 +1232,7 @@ "x": 2218.9287723423276, "y": 1332.2598463956504 }, - "selected": true, + "selected": false, "type": "genericNode" }, { @@ -1255,7 +1256,7 @@ ], "frozen": false, "icon": "braces", - "last_updated": "2025-11-11T15:06:45.096Z", + "last_updated": "2025-11-24T18:01:42.358Z", "legacy": false, "lf_version": "1.6.3.dev0", "metadata": {}, @@ -1345,6 +1346,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -1375,6 +1377,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -1405,6 +1408,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -1435,6 +1439,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -2617,7 +2622,7 @@ ], "frozen": false, "icon": "table", - "last_updated": "2025-11-11T15:06:45.203Z", + "last_updated": "2025-11-24T18:01:42.468Z", "legacy": false, "lf_version": "1.6.3.dev0", "metadata": { @@ -3033,7 +3038,7 @@ ], "frozen": false, "icon": "table", - "last_updated": "2025-11-11T15:06:45.204Z", + "last_updated": "2025-11-24T18:01:42.469Z", "legacy": false, "lf_version": "1.6.3.dev0", "metadata": { @@ -3449,7 +3454,7 @@ ], "frozen": false, "icon": "table", - "last_updated": "2025-11-11T15:06:45.205Z", + "last_updated": "2025-11-24T18:01:42.469Z", "legacy": false, "lf_version": "1.6.3.dev0", "metadata": { @@ -3889,23 +3894,32 @@ "request_timeout", "max_retries", "show_progress_bar", - "model_kwargs" + "model_kwargs", + "truncate_input_tokens", + "input_text" ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-11T15:06:53.974Z", "legacy": false, "metadata": { - "code_hash": "bb03f97be707", + "code_hash": "c5e0a4535a27", "dependencies": { "dependencies": [ + { + "name": "requests", + "version": "2.32.5" + }, + { + "name": "ibm_watsonx_ai", + "version": "1.4.2" + }, { "name": "langchain_openai", "version": "0.3.23" }, { "name": "lfx", - "version": null + "version": "0.2.0.dev19" }, { "name": "langchain_ollama", @@ -3920,9 +3934,9 @@ "version": "0.3.19" } ], - "total_dependencies": 5 + "total_dependencies": 7 }, - "module": "lfx.components.models.embedding_model.EmbeddingModelComponent" + "module": "custom_components.embedding_model" }, "minimized": false, "output_types": [], @@ -3967,6 +3981,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -3985,6 +4000,7 @@ "required": true, "show": true, "title_case": false, + "track_in_telemetry": false, "type": "str", "value": "OPENAI_API_KEY" }, @@ -4015,6 +4031,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "https://us-south.ml.cloud.ibm.com" }, @@ -4033,6 +4050,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": 1000 }, @@ -4052,7 +4070,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import IBM_WATSONX_URLS, WATSONX_EMBEDDING_MODEL_NAMES\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n return OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n return OllamaEmbeddings(\n model=model,\n base_url=transformed_base_url or \"http://localhost:11434\",\n **model_kwargs,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n params = {\n \"model_id\": model,\n \"url\": base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\",\n \"apikey\": api_key,\n }\n\n params[\"project_id\"] = project_id\n\n return WatsonxEmbeddings(**params)\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = WATSONX_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = WATSONX_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n return OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n return OllamaEmbeddings(\n model=model,\n base_url=transformed_base_url or \"http://localhost:11434\",\n **model_kwargs,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n credentials = Credentials(\n api_key=self.api_key,\n url=base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\",\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n return WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" }, "dimensions": { "_input_type": "IntInput", @@ -4069,9 +4087,29 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": "" }, + "input_text": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Include the original text in the output", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "input_text", + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, "max_retries": { "_input_type": "IntInput", "advanced": true, @@ -4087,6 +4125,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": 3 }, @@ -4107,12 +4146,15 @@ ], "options_metadata": [], "placeholder": "", + "real_time_refresh": true, + "refresh_button": true, "required": false, "show": true, "title_case": false, "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "text-embedding-3-small" }, @@ -4131,6 +4173,7 @@ "title_case": false, "tool_mode": false, "trace_as_input": true, + "track_in_telemetry": false, "type": "dict", "value": {} }, @@ -4155,6 +4198,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -4178,6 +4222,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -4215,6 +4260,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "OpenAI" }, @@ -4233,6 +4279,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "float", "value": "" }, @@ -4251,8 +4298,28 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "bool", "value": false + }, + "truncate_input_tokens": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Truncate Input Tokens", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "truncate_input_tokens", + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 200 } }, "tool_mode": false @@ -4275,16 +4342,16 @@ } ], "viewport": { - "x": -35.55006677196684, - "y": -678.0353419327157, - "zoom": 0.5646686365360081 + "x": 354.22064006192994, + "y": -436.1821097171422, + "zoom": 0.45965778621327413 } }, "description": "Load your data for chat context with Retrieval Augmented Generation.", "endpoint_name": null, "id": "5488df7c-b93f-4f87-a446-b67028bc0813", "is_component": false, - "last_tested_version": "1.7.0", + "last_tested_version": "1.7.0.dev19", "name": "OpenSearch Ingestion Flow", "tags": [ "openai", diff --git a/flows/openrag_agent.json b/flows/openrag_agent.json index 9640c6a8..af0ed600 100644 --- a/flows/openrag_agent.json +++ b/flows/openrag_agent.json @@ -173,6 +173,7 @@ }, { "animated": false, + "className": "", "data": { "sourceHandle": { "dataType": "CalculatorComponent", @@ -242,7 +243,7 @@ ], "frozen": false, "icon": "OpenSearch", - "last_updated": "2025-11-11T21:40:35.507Z", + "last_updated": "2025-11-24T18:02:41.464Z", "legacy": false, "lf_version": "1.6.0", "metadata": { @@ -713,6 +714,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "tools", "value": [ { @@ -992,7 +994,7 @@ "frozen": false, "icon": "Mcp", "key": "mcp_lf-starter_project", - "last_updated": "2025-11-11T21:40:35.508Z", + "last_updated": "2025-11-24T18:02:41.465Z", "legacy": false, "mcpServerName": "lf-starter_project", "metadata": { @@ -1141,6 +1143,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "tools", "value": [ { @@ -1821,23 +1824,32 @@ "request_timeout", "max_retries", "show_progress_bar", - "model_kwargs" + "model_kwargs", + "truncate_input_tokens", + "input_text" ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-11T21:40:35.510Z", "legacy": false, "metadata": { - "code_hash": "bb03f97be707", + "code_hash": "c5e0a4535a27", "dependencies": { "dependencies": [ + { + "name": "requests", + "version": "2.32.5" + }, + { + "name": "ibm_watsonx_ai", + "version": "1.4.2" + }, { "name": "langchain_openai", "version": "0.3.23" }, { "name": "lfx", - "version": null + "version": "0.2.0.dev19" }, { "name": "langchain_ollama", @@ -1852,9 +1864,9 @@ "version": "0.3.19" } ], - "total_dependencies": 5 + "total_dependencies": 7 }, - "module": "lfx.components.models.embedding_model.EmbeddingModelComponent" + "module": "custom_components.embedding_model" }, "minimized": false, "output_types": [], @@ -1899,6 +1911,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -1917,6 +1930,7 @@ "required": true, "show": true, "title_case": false, + "track_in_telemetry": false, "type": "str", "value": "OPENAI_API_KEY" }, @@ -1947,6 +1961,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "https://us-south.ml.cloud.ibm.com" }, @@ -1965,6 +1980,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": 1000 }, @@ -1984,7 +2000,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import IBM_WATSONX_URLS, WATSONX_EMBEDDING_MODEL_NAMES\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n return OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n return OllamaEmbeddings(\n model=model,\n base_url=transformed_base_url or \"http://localhost:11434\",\n **model_kwargs,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n params = {\n \"model_id\": model,\n \"url\": base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\",\n \"apikey\": api_key,\n }\n\n params[\"project_id\"] = project_id\n\n return WatsonxEmbeddings(**params)\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = WATSONX_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = WATSONX_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n return OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n return OllamaEmbeddings(\n model=model,\n base_url=transformed_base_url or \"http://localhost:11434\",\n **model_kwargs,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n credentials = Credentials(\n api_key=self.api_key,\n url=base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\",\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n return WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" }, "dimensions": { "_input_type": "IntInput", @@ -2001,9 +2017,29 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": "" }, + "input_text": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Include the original text in the output", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "input_text", + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, "max_retries": { "_input_type": "IntInput", "advanced": true, @@ -2019,6 +2055,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": 3 }, @@ -2033,16 +2070,21 @@ "info": "Select the embedding model to use", "name": "model", "options": [ - "text-embedding-3-small" + "text-embedding-3-small", + "text-embedding-3-large", + "text-embedding-ada-002" ], "options_metadata": [], "placeholder": "", + "real_time_refresh": true, + "refresh_button": true, "required": false, "show": true, "title_case": false, "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "text-embedding-3-small" }, @@ -2061,6 +2103,7 @@ "title_case": false, "tool_mode": false, "trace_as_input": true, + "track_in_telemetry": false, "type": "dict", "value": {} }, @@ -2085,6 +2128,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -2108,6 +2152,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -2145,6 +2190,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "OpenAI" }, @@ -2163,6 +2209,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "float", "value": "" }, @@ -2181,8 +2228,28 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "bool", "value": false + }, + "truncate_input_tokens": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Truncate Input Tokens", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "truncate_input_tokens", + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 200 } }, "tool_mode": false @@ -2193,7 +2260,7 @@ "dragging": false, "id": "EmbeddingModel-oPi95", "measured": { - "height": 451, + "height": 369, "width": 320 }, "position": { @@ -2247,10 +2314,9 @@ ], "frozen": false, "icon": "bot", - "last_updated": "2025-11-11T21:40:35.510Z", "legacy": false, "metadata": { - "code_hash": "adf733969280", + "code_hash": "d64b11c24a1c", "dependencies": { "dependencies": [ { @@ -2263,12 +2329,12 @@ }, { "name": "lfx", - "version": null + "version": "0.2.0.dev19" } ], "total_dependencies": 3 }, - "module": "lfx.components.agents.agent.AgentComponent" + "module": "custom_components.agent" }, "minimized": false, "output_types": [], @@ -2308,6 +2374,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "bool", "value": true }, @@ -2333,6 +2400,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "A helpful assistant with access to the following tools:" }, @@ -2390,6 +2458,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "OpenAI" }, @@ -2408,6 +2477,7 @@ "required": false, "show": true, "title_case": false, + "track_in_telemetry": false, "type": "str", "value": "OPENAI_API_KEY" }, @@ -2427,6 +2497,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -2446,7 +2517,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import json\nimport re\n\nfrom langchain_core.tools import StructuredTool, Tool\nfrom pydantic import ValidationError\n\nfrom lfx.base.agents.agent import LCToolsAgentComponent\nfrom lfx.base.agents.events import ExceptionWithMessageError\nfrom lfx.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODEL_PROVIDERS_LIST,\n MODELS_METADATA,\n)\nfrom lfx.base.models.model_utils import get_model_name\nfrom lfx.components.helpers.current_date import CurrentDateComponent\nfrom lfx.components.helpers.memory import MemoryComponent\nfrom lfx.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom lfx.custom.custom_component.component import get_component_toolkit\nfrom lfx.custom.utils import update_component_build_config\nfrom lfx.helpers.base_model import build_model_from_schema\nfrom lfx.inputs.inputs import BoolInput, SecretStrInput, StrInput\nfrom lfx.io import DropdownInput, IntInput, MessageTextInput, MultilineInput, Output, TableInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.data import Data\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.schema.message import Message\nfrom lfx.schema.table import EditMode\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n documentation: str = \"https://docs.langflow.org/agents\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n # Filter out json_mode from OpenAI inputs since we handle structured output differently\n if \"OpenAI\" in MODEL_PROVIDERS_DICT:\n openai_inputs_filtered = [\n input_field\n for input_field in MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"]\n if not (hasattr(input_field, \"name\") and input_field.name == \"json_mode\")\n ]\n else:\n openai_inputs_filtered = []\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*MODEL_PROVIDERS_LIST],\n value=\"OpenAI\",\n real_time_refresh=True,\n refresh_button=False,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in MODEL_PROVIDERS_LIST if key in MODELS_METADATA],\n external_options={\n \"fields\": {\n \"data\": {\n \"node\": {\n \"name\": \"connect_other_models\",\n \"display_name\": \"Connect other models\",\n \"icon\": \"CornerDownLeft\",\n }\n }\n },\n },\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"The API key to use for the model.\",\n required=True,\n ),\n StrInput(\n name=\"base_url\",\n display_name=\"Base URL\",\n info=\"The base URL of the API.\",\n required=True,\n show=False,\n ),\n StrInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"The project ID of the model.\",\n required=True,\n show=False,\n ),\n IntInput(\n name=\"max_output_tokens\",\n display_name=\"Max Output Tokens\",\n info=\"The maximum number of tokens to generate.\",\n show=False,\n ),\n *openai_inputs_filtered,\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Chat History Messages\",\n value=100,\n info=\"Number of chat history messages to retrieve.\",\n advanced=True,\n show=True,\n ),\n MultilineInput(\n name=\"format_instructions\",\n display_name=\"Output Format Instructions\",\n info=\"Generic Template for structured output formatting. Valid only with Structured response.\",\n value=(\n \"You are an AI that extracts structured JSON objects from unstructured text. \"\n \"Use a predefined schema with expected types (str, int, float, bool, dict). \"\n \"Extract ALL relevant instances that match the schema - if multiple patterns exist, capture them all. \"\n \"Fill missing or ambiguous values with defaults: null for missing values. \"\n \"Remove exact duplicates but keep variations that have different field values. \"\n \"Always return valid JSON in the expected format, never throw errors. \"\n \"If multiple objects can be extracted, return them all in the structured format.\"\n ),\n advanced=True,\n ),\n TableInput(\n name=\"output_schema\",\n display_name=\"Output Schema\",\n info=(\n \"Schema Validation: Define the structure and data types for structured output. \"\n \"No validation if no output schema.\"\n ),\n advanced=True,\n required=False,\n value=[],\n table_schema=[\n {\n \"name\": \"name\",\n \"display_name\": \"Name\",\n \"type\": \"str\",\n \"description\": \"Specify the name of the output field.\",\n \"default\": \"field\",\n \"edit_mode\": EditMode.INLINE,\n },\n {\n \"name\": \"description\",\n \"display_name\": \"Description\",\n \"type\": \"str\",\n \"description\": \"Describe the purpose of the output field.\",\n \"default\": \"description of field\",\n \"edit_mode\": EditMode.POPOVER,\n },\n {\n \"name\": \"type\",\n \"display_name\": \"Type\",\n \"type\": \"str\",\n \"edit_mode\": EditMode.INLINE,\n \"description\": (\"Indicate the data type of the output field (e.g., str, int, float, bool, dict).\"),\n \"options\": [\"str\", \"int\", \"float\", \"bool\", \"dict\"],\n \"default\": \"str\",\n },\n {\n \"name\": \"multiple\",\n \"display_name\": \"As List\",\n \"type\": \"boolean\",\n \"description\": \"Set to True if this output field should be a list of the specified type.\",\n \"default\": \"False\",\n \"edit_mode\": EditMode.INLINE,\n },\n ],\n ),\n *LCToolsAgentComponent.get_base_inputs(),\n # removed memory inputs from agent component\n # *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [\n Output(name=\"response\", display_name=\"Response\", method=\"message_response\"),\n ]\n\n async def get_agent_requirements(self):\n \"\"\"Get the agent requirements for the agent.\"\"\"\n llm_model, display_name = await self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n await logger.adebug(f\"Retrieved {len(self.chat_history)} chat history messages\")\n if isinstance(self.chat_history, Message):\n self.chat_history = [self.chat_history]\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Set shared callbacks for tracing the tools used by the agent\n self.set_tools_callbacks(self.tools, self._get_shared_callbacks())\n\n return llm_model, self.chat_history, self.tools\n\n async def message_response(self) -> Message:\n try:\n llm_model, self.chat_history, self.tools = await self.get_agent_requirements()\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools or [],\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n result = await self.run_agent(agent)\n\n # Store result for potential JSON output\n self._agent_result = result\n\n except (ValueError, TypeError, KeyError) as e:\n await logger.aerror(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n await logger.aerror(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n # Avoid catching blind Exception; let truly unexpected exceptions propagate\n except Exception as e:\n await logger.aerror(f\"Unexpected error: {e!s}\")\n raise\n else:\n return result\n\n def _preprocess_schema(self, schema):\n \"\"\"Preprocess schema to ensure correct data types for build_model_from_schema.\"\"\"\n processed_schema = []\n for field in schema:\n processed_field = {\n \"name\": str(field.get(\"name\", \"field\")),\n \"type\": str(field.get(\"type\", \"str\")),\n \"description\": str(field.get(\"description\", \"\")),\n \"multiple\": field.get(\"multiple\", False),\n }\n # Ensure multiple is handled correctly\n if isinstance(processed_field[\"multiple\"], str):\n processed_field[\"multiple\"] = processed_field[\"multiple\"].lower() in [\n \"true\",\n \"1\",\n \"t\",\n \"y\",\n \"yes\",\n ]\n processed_schema.append(processed_field)\n return processed_schema\n\n async def build_structured_output_base(self, content: str):\n \"\"\"Build structured output with optional BaseModel validation.\"\"\"\n json_pattern = r\"\\{.*\\}\"\n schema_error_msg = \"Try setting an output schema\"\n\n # Try to parse content as JSON first\n json_data = None\n try:\n json_data = json.loads(content)\n except json.JSONDecodeError:\n json_match = re.search(json_pattern, content, re.DOTALL)\n if json_match:\n try:\n json_data = json.loads(json_match.group())\n except json.JSONDecodeError:\n return {\"content\": content, \"error\": schema_error_msg}\n else:\n return {\"content\": content, \"error\": schema_error_msg}\n\n # If no output schema provided, return parsed JSON without validation\n if not hasattr(self, \"output_schema\") or not self.output_schema or len(self.output_schema) == 0:\n return json_data\n\n # Use BaseModel validation with schema\n try:\n processed_schema = self._preprocess_schema(self.output_schema)\n output_model = build_model_from_schema(processed_schema)\n\n # Validate against the schema\n if isinstance(json_data, list):\n # Multiple objects\n validated_objects = []\n for item in json_data:\n try:\n validated_obj = output_model.model_validate(item)\n validated_objects.append(validated_obj.model_dump())\n except ValidationError as e:\n await logger.aerror(f\"Validation error for item: {e}\")\n # Include invalid items with error info\n validated_objects.append({\"data\": item, \"validation_error\": str(e)})\n return validated_objects\n\n # Single object\n try:\n validated_obj = output_model.model_validate(json_data)\n return [validated_obj.model_dump()] # Return as list for consistency\n except ValidationError as e:\n await logger.aerror(f\"Validation error: {e}\")\n return [{\"data\": json_data, \"validation_error\": str(e)}]\n\n except (TypeError, ValueError) as e:\n await logger.aerror(f\"Error building structured output: {e}\")\n # Fallback to parsed JSON without validation\n return json_data\n\n async def json_response(self) -> Data:\n \"\"\"Convert agent response to structured JSON Data output with schema validation.\"\"\"\n # Always use structured chat agent for JSON response mode for better JSON formatting\n try:\n system_components = []\n\n # 1. Agent Instructions (system_prompt)\n agent_instructions = getattr(self, \"system_prompt\", \"\") or \"\"\n if agent_instructions:\n system_components.append(f\"{agent_instructions}\")\n\n # 2. Format Instructions\n format_instructions = getattr(self, \"format_instructions\", \"\") or \"\"\n if format_instructions:\n system_components.append(f\"Format instructions: {format_instructions}\")\n\n # 3. Schema Information from BaseModel\n if hasattr(self, \"output_schema\") and self.output_schema and len(self.output_schema) > 0:\n try:\n processed_schema = self._preprocess_schema(self.output_schema)\n output_model = build_model_from_schema(processed_schema)\n schema_dict = output_model.model_json_schema()\n schema_info = (\n \"You are given some text that may include format instructions, \"\n \"explanations, or other content alongside a JSON schema.\\n\\n\"\n \"Your task:\\n\"\n \"- Extract only the JSON schema.\\n\"\n \"- Return it as valid JSON.\\n\"\n \"- Do not include format instructions, explanations, or extra text.\\n\\n\"\n \"Input:\\n\"\n f\"{json.dumps(schema_dict, indent=2)}\\n\\n\"\n \"Output (only JSON schema):\"\n )\n system_components.append(schema_info)\n except (ValidationError, ValueError, TypeError, KeyError) as e:\n await logger.aerror(f\"Could not build schema for prompt: {e}\", exc_info=True)\n\n # Combine all components\n combined_instructions = \"\\n\\n\".join(system_components) if system_components else \"\"\n llm_model, self.chat_history, self.tools = await self.get_agent_requirements()\n self.set(\n llm=llm_model,\n tools=self.tools or [],\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=combined_instructions,\n )\n\n # Create and run structured chat agent\n try:\n structured_agent = self.create_agent_runnable()\n except (NotImplementedError, ValueError, TypeError) as e:\n await logger.aerror(f\"Error with structured chat agent: {e}\")\n raise\n try:\n result = await self.run_agent(structured_agent)\n except (\n ExceptionWithMessageError,\n ValueError,\n TypeError,\n RuntimeError,\n ) as e:\n await logger.aerror(f\"Error with structured agent result: {e}\")\n raise\n # Extract content from structured agent result\n if hasattr(result, \"content\"):\n content = result.content\n elif hasattr(result, \"text\"):\n content = result.text\n else:\n content = str(result)\n\n except (\n ExceptionWithMessageError,\n ValueError,\n TypeError,\n NotImplementedError,\n AttributeError,\n ) as e:\n await logger.aerror(f\"Error with structured chat agent: {e}\")\n # Fallback to regular agent\n content_str = \"No content returned from agent\"\n return Data(data={\"content\": content_str, \"error\": str(e)})\n\n # Process with structured output validation\n try:\n structured_output = await self.build_structured_output_base(content)\n\n # Handle different output formats\n if isinstance(structured_output, list) and structured_output:\n if len(structured_output) == 1:\n return Data(data=structured_output[0])\n return Data(data={\"results\": structured_output})\n if isinstance(structured_output, dict):\n return Data(data=structured_output)\n return Data(data={\"content\": content})\n\n except (ValueError, TypeError) as e:\n await logger.aerror(f\"Error in structured output processing: {e}\")\n return Data(data={\"content\": content, \"error\": str(e)})\n\n async def get_memory_data(self):\n # TODO: This is a temporary fix to avoid message duplication. We should develop a function for this.\n messages = (\n await MemoryComponent(**self.get_base_args())\n .set(\n session_id=self.graph.session_id,\n context_id=self.context_id,\n order=\"Ascending\",\n n_messages=self.n_messages,\n )\n .retrieve_messages()\n )\n return [\n message for message in messages if getattr(message, \"id\", None) != getattr(self.input_value, \"id\", None)\n ]\n\n async def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except (AttributeError, ValueError, TypeError, RuntimeError) as e:\n await logger.aerror(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n # Filter out json_mode and only use attributes that exist on this component\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n if build_config is not None and field in build_config:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n build_config[\"agent_llm\"][\"display_name\"] = \"Model Provider\"\n elif field_value == \"connect_other_models\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*MODEL_PROVIDERS_LIST],\n real_time_refresh=True,\n refresh_button=False,\n input_types=[\"LanguageModel\"],\n placeholder=\"Awaiting model input.\",\n options_metadata=[MODELS_METADATA[key] for key in MODEL_PROVIDERS_LIST if key in MODELS_METADATA],\n external_options={\n \"fields\": {\n \"data\": {\n \"node\": {\n \"name\": \"connect_other_models\",\n \"display_name\": \"Connect other models\",\n \"icon\": \"CornerDownLeft\",\n },\n }\n },\n },\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\",\n tool_description=description,\n # here we do not use the shared callbacks as we are exposing the agent as a tool\n callbacks=self.get_langchain_callbacks(),\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n\n return tools\n" + "value": "import json\nimport re\n\nfrom langchain_core.tools import StructuredTool, Tool\nfrom pydantic import ValidationError\n\nfrom lfx.base.agents.agent import LCToolsAgentComponent\nfrom lfx.base.agents.events import ExceptionWithMessageError\nfrom lfx.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODEL_PROVIDERS_LIST,\n MODELS_METADATA,\n)\nfrom lfx.base.models.model_utils import get_model_name\nfrom lfx.components.helpers import CurrentDateComponent\nfrom lfx.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom lfx.components.models_and_agents.memory import MemoryComponent\nfrom lfx.custom.custom_component.component import get_component_toolkit\nfrom lfx.custom.utils import update_component_build_config\nfrom lfx.helpers.base_model import build_model_from_schema\nfrom lfx.inputs.inputs import BoolInput, SecretStrInput, StrInput\nfrom lfx.io import DropdownInput, IntInput, MessageTextInput, MultilineInput, Output, TableInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.data import Data\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.schema.message import Message\nfrom lfx.schema.table import EditMode\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n documentation: str = \"https://docs.langflow.org/agents\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n # Filter out json_mode from OpenAI inputs since we handle structured output differently\n if \"OpenAI\" in MODEL_PROVIDERS_DICT:\n openai_inputs_filtered = [\n input_field\n for input_field in MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"]\n if not (hasattr(input_field, \"name\") and input_field.name == \"json_mode\")\n ]\n else:\n openai_inputs_filtered = []\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*MODEL_PROVIDERS_LIST],\n value=\"OpenAI\",\n real_time_refresh=True,\n refresh_button=False,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in MODEL_PROVIDERS_LIST if key in MODELS_METADATA],\n external_options={\n \"fields\": {\n \"data\": {\n \"node\": {\n \"name\": \"connect_other_models\",\n \"display_name\": \"Connect other models\",\n \"icon\": \"CornerDownLeft\",\n }\n }\n },\n },\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"The API key to use for the model.\",\n required=True,\n ),\n StrInput(\n name=\"base_url\",\n display_name=\"Base URL\",\n info=\"The base URL of the API.\",\n required=True,\n show=False,\n ),\n StrInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"The project ID of the model.\",\n required=True,\n show=False,\n ),\n IntInput(\n name=\"max_output_tokens\",\n display_name=\"Max Output Tokens\",\n info=\"The maximum number of tokens to generate.\",\n show=False,\n ),\n *openai_inputs_filtered,\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Chat History Messages\",\n value=100,\n info=\"Number of chat history messages to retrieve.\",\n advanced=True,\n show=True,\n ),\n MultilineInput(\n name=\"format_instructions\",\n display_name=\"Output Format Instructions\",\n info=\"Generic Template for structured output formatting. Valid only with Structured response.\",\n value=(\n \"You are an AI that extracts structured JSON objects from unstructured text. \"\n \"Use a predefined schema with expected types (str, int, float, bool, dict). \"\n \"Extract ALL relevant instances that match the schema - if multiple patterns exist, capture them all. \"\n \"Fill missing or ambiguous values with defaults: null for missing values. \"\n \"Remove exact duplicates but keep variations that have different field values. \"\n \"Always return valid JSON in the expected format, never throw errors. \"\n \"If multiple objects can be extracted, return them all in the structured format.\"\n ),\n advanced=True,\n ),\n TableInput(\n name=\"output_schema\",\n display_name=\"Output Schema\",\n info=(\n \"Schema Validation: Define the structure and data types for structured output. \"\n \"No validation if no output schema.\"\n ),\n advanced=True,\n required=False,\n value=[],\n table_schema=[\n {\n \"name\": \"name\",\n \"display_name\": \"Name\",\n \"type\": \"str\",\n \"description\": \"Specify the name of the output field.\",\n \"default\": \"field\",\n \"edit_mode\": EditMode.INLINE,\n },\n {\n \"name\": \"description\",\n \"display_name\": \"Description\",\n \"type\": \"str\",\n \"description\": \"Describe the purpose of the output field.\",\n \"default\": \"description of field\",\n \"edit_mode\": EditMode.POPOVER,\n },\n {\n \"name\": \"type\",\n \"display_name\": \"Type\",\n \"type\": \"str\",\n \"edit_mode\": EditMode.INLINE,\n \"description\": (\"Indicate the data type of the output field (e.g., str, int, float, bool, dict).\"),\n \"options\": [\"str\", \"int\", \"float\", \"bool\", \"dict\"],\n \"default\": \"str\",\n },\n {\n \"name\": \"multiple\",\n \"display_name\": \"As List\",\n \"type\": \"boolean\",\n \"description\": \"Set to True if this output field should be a list of the specified type.\",\n \"default\": \"False\",\n \"edit_mode\": EditMode.INLINE,\n },\n ],\n ),\n *LCToolsAgentComponent.get_base_inputs(),\n # removed memory inputs from agent component\n # *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [\n Output(name=\"response\", display_name=\"Response\", method=\"message_response\"),\n ]\n\n async def get_agent_requirements(self):\n \"\"\"Get the agent requirements for the agent.\"\"\"\n llm_model, display_name = await self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n await logger.adebug(f\"Retrieved {len(self.chat_history)} chat history messages\")\n if isinstance(self.chat_history, Message):\n self.chat_history = [self.chat_history]\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Set shared callbacks for tracing the tools used by the agent\n self.set_tools_callbacks(self.tools, self._get_shared_callbacks())\n\n return llm_model, self.chat_history, self.tools\n\n async def message_response(self) -> Message:\n try:\n llm_model, self.chat_history, self.tools = await self.get_agent_requirements()\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools or [],\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n result = await self.run_agent(agent)\n\n # Store result for potential JSON output\n self._agent_result = result\n\n except (ValueError, TypeError, KeyError) as e:\n await logger.aerror(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n await logger.aerror(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n # Avoid catching blind Exception; let truly unexpected exceptions propagate\n except Exception as e:\n await logger.aerror(f\"Unexpected error: {e!s}\")\n raise\n else:\n return result\n\n def _preprocess_schema(self, schema):\n \"\"\"Preprocess schema to ensure correct data types for build_model_from_schema.\"\"\"\n processed_schema = []\n for field in schema:\n processed_field = {\n \"name\": str(field.get(\"name\", \"field\")),\n \"type\": str(field.get(\"type\", \"str\")),\n \"description\": str(field.get(\"description\", \"\")),\n \"multiple\": field.get(\"multiple\", False),\n }\n # Ensure multiple is handled correctly\n if isinstance(processed_field[\"multiple\"], str):\n processed_field[\"multiple\"] = processed_field[\"multiple\"].lower() in [\n \"true\",\n \"1\",\n \"t\",\n \"y\",\n \"yes\",\n ]\n processed_schema.append(processed_field)\n return processed_schema\n\n async def build_structured_output_base(self, content: str):\n \"\"\"Build structured output with optional BaseModel validation.\"\"\"\n json_pattern = r\"\\{.*\\}\"\n schema_error_msg = \"Try setting an output schema\"\n\n # Try to parse content as JSON first\n json_data = None\n try:\n json_data = json.loads(content)\n except json.JSONDecodeError:\n json_match = re.search(json_pattern, content, re.DOTALL)\n if json_match:\n try:\n json_data = json.loads(json_match.group())\n except json.JSONDecodeError:\n return {\"content\": content, \"error\": schema_error_msg}\n else:\n return {\"content\": content, \"error\": schema_error_msg}\n\n # If no output schema provided, return parsed JSON without validation\n if not hasattr(self, \"output_schema\") or not self.output_schema or len(self.output_schema) == 0:\n return json_data\n\n # Use BaseModel validation with schema\n try:\n processed_schema = self._preprocess_schema(self.output_schema)\n output_model = build_model_from_schema(processed_schema)\n\n # Validate against the schema\n if isinstance(json_data, list):\n # Multiple objects\n validated_objects = []\n for item in json_data:\n try:\n validated_obj = output_model.model_validate(item)\n validated_objects.append(validated_obj.model_dump())\n except ValidationError as e:\n await logger.aerror(f\"Validation error for item: {e}\")\n # Include invalid items with error info\n validated_objects.append({\"data\": item, \"validation_error\": str(e)})\n return validated_objects\n\n # Single object\n try:\n validated_obj = output_model.model_validate(json_data)\n return [validated_obj.model_dump()] # Return as list for consistency\n except ValidationError as e:\n await logger.aerror(f\"Validation error: {e}\")\n return [{\"data\": json_data, \"validation_error\": str(e)}]\n\n except (TypeError, ValueError) as e:\n await logger.aerror(f\"Error building structured output: {e}\")\n # Fallback to parsed JSON without validation\n return json_data\n\n async def json_response(self) -> Data:\n \"\"\"Convert agent response to structured JSON Data output with schema validation.\"\"\"\n # Always use structured chat agent for JSON response mode for better JSON formatting\n try:\n system_components = []\n\n # 1. Agent Instructions (system_prompt)\n agent_instructions = getattr(self, \"system_prompt\", \"\") or \"\"\n if agent_instructions:\n system_components.append(f\"{agent_instructions}\")\n\n # 2. Format Instructions\n format_instructions = getattr(self, \"format_instructions\", \"\") or \"\"\n if format_instructions:\n system_components.append(f\"Format instructions: {format_instructions}\")\n\n # 3. Schema Information from BaseModel\n if hasattr(self, \"output_schema\") and self.output_schema and len(self.output_schema) > 0:\n try:\n processed_schema = self._preprocess_schema(self.output_schema)\n output_model = build_model_from_schema(processed_schema)\n schema_dict = output_model.model_json_schema()\n schema_info = (\n \"You are given some text that may include format instructions, \"\n \"explanations, or other content alongside a JSON schema.\\n\\n\"\n \"Your task:\\n\"\n \"- Extract only the JSON schema.\\n\"\n \"- Return it as valid JSON.\\n\"\n \"- Do not include format instructions, explanations, or extra text.\\n\\n\"\n \"Input:\\n\"\n f\"{json.dumps(schema_dict, indent=2)}\\n\\n\"\n \"Output (only JSON schema):\"\n )\n system_components.append(schema_info)\n except (ValidationError, ValueError, TypeError, KeyError) as e:\n await logger.aerror(f\"Could not build schema for prompt: {e}\", exc_info=True)\n\n # Combine all components\n combined_instructions = \"\\n\\n\".join(system_components) if system_components else \"\"\n llm_model, self.chat_history, self.tools = await self.get_agent_requirements()\n self.set(\n llm=llm_model,\n tools=self.tools or [],\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=combined_instructions,\n )\n\n # Create and run structured chat agent\n try:\n structured_agent = self.create_agent_runnable()\n except (NotImplementedError, ValueError, TypeError) as e:\n await logger.aerror(f\"Error with structured chat agent: {e}\")\n raise\n try:\n result = await self.run_agent(structured_agent)\n except (\n ExceptionWithMessageError,\n ValueError,\n TypeError,\n RuntimeError,\n ) as e:\n await logger.aerror(f\"Error with structured agent result: {e}\")\n raise\n # Extract content from structured agent result\n if hasattr(result, \"content\"):\n content = result.content\n elif hasattr(result, \"text\"):\n content = result.text\n else:\n content = str(result)\n\n except (\n ExceptionWithMessageError,\n ValueError,\n TypeError,\n NotImplementedError,\n AttributeError,\n ) as e:\n await logger.aerror(f\"Error with structured chat agent: {e}\")\n # Fallback to regular agent\n content_str = \"No content returned from agent\"\n return Data(data={\"content\": content_str, \"error\": str(e)})\n\n # Process with structured output validation\n try:\n structured_output = await self.build_structured_output_base(content)\n\n # Handle different output formats\n if isinstance(structured_output, list) and structured_output:\n if len(structured_output) == 1:\n return Data(data=structured_output[0])\n return Data(data={\"results\": structured_output})\n if isinstance(structured_output, dict):\n return Data(data=structured_output)\n return Data(data={\"content\": content})\n\n except (ValueError, TypeError) as e:\n await logger.aerror(f\"Error in structured output processing: {e}\")\n return Data(data={\"content\": content, \"error\": str(e)})\n\n async def get_memory_data(self):\n # TODO: This is a temporary fix to avoid message duplication. We should develop a function for this.\n messages = (\n await MemoryComponent(**self.get_base_args())\n .set(\n session_id=self.graph.session_id,\n context_id=self.context_id,\n order=\"Ascending\",\n n_messages=self.n_messages,\n )\n .retrieve_messages()\n )\n return [\n message for message in messages if getattr(message, \"id\", None) != getattr(self.input_value, \"id\", None)\n ]\n\n async def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except (AttributeError, ValueError, TypeError, RuntimeError) as e:\n await logger.aerror(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n # Filter out json_mode and only use attributes that exist on this component\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n if build_config is not None and field in build_config:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n build_config[\"agent_llm\"][\"display_name\"] = \"Model Provider\"\n elif field_value == \"connect_other_models\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*MODEL_PROVIDERS_LIST],\n real_time_refresh=True,\n refresh_button=False,\n input_types=[\"LanguageModel\"],\n placeholder=\"Awaiting model input.\",\n options_metadata=[MODELS_METADATA[key] for key in MODEL_PROVIDERS_LIST if key in MODELS_METADATA],\n external_options={\n \"fields\": {\n \"data\": {\n \"node\": {\n \"name\": \"connect_other_models\",\n \"display_name\": \"Connect other models\",\n \"icon\": \"CornerDownLeft\",\n },\n }\n },\n },\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\",\n tool_description=description,\n # here we do not use the shared callbacks as we are exposing the agent as a tool\n callbacks=self.get_langchain_callbacks(),\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n\n return tools\n" }, "context_id": { "_input_type": "MessageTextInput", @@ -2468,6 +2539,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -2493,6 +2565,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "You are an AI that extracts structured JSON objects from unstructured text. Use a predefined schema with expected types (str, int, float, bool, dict). Extract ALL relevant instances that match the schema - if multiple patterns exist, capture them all. Fill missing or ambiguous values with defaults: null for missing values. Remove exact duplicates but keep variations that have different field values. Always return valid JSON in the expected format, never throw errors. If multiple objects can be extracted, return them all in the structured format." }, @@ -2511,6 +2584,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "bool", "value": true }, @@ -2534,6 +2608,7 @@ "tool_mode": true, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -2552,6 +2627,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": 15 }, @@ -2570,6 +2646,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": "" }, @@ -2588,6 +2665,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": 5 }, @@ -2612,6 +2690,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": "" }, @@ -2630,6 +2709,7 @@ "title_case": false, "tool_mode": false, "trace_as_input": true, + "track_in_telemetry": false, "type": "dict", "value": {} }, @@ -2642,9 +2722,29 @@ "dynamic": false, "external_options": {}, "info": "To see the model names, first choose a provider. Then, enter your API key and click the refresh button next to the model name.", + "load_from_db": false, "name": "model_name", "options": [ - "gpt-4o" + "gpt-4o-mini", + "gpt-4o", + "gpt-4.1", + "gpt-4.1-mini", + "gpt-4.1-nano", + "gpt-4-turbo", + "gpt-4-turbo-preview", + "gpt-4", + "gpt-3.5-turbo", + "gpt-5.1", + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-chat-latest", + "o1", + "o3-mini", + "o3", + "o3-pro", + "o4-mini", + "o4-mini-high" ], "options_metadata": [], "placeholder": "", @@ -2655,6 +2755,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "gpt-4o" }, @@ -2673,6 +2774,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": 100 }, @@ -2692,6 +2794,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -2752,6 +2855,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "trigger_icon": "Table", "trigger_text": "Open table", "type": "table", @@ -2773,6 +2877,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -2791,6 +2896,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": 1 }, @@ -2816,6 +2922,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "You are the OpenRAG Agent. You answer questions using retrieval, reasoning, and tool use.\nYou have access to several tools. Your job is to determine **which tool to use and when**.\n### Available Tools\n- OpenSearch Retrieval Tool:\n Use this to search the indexed knowledge base. Use when the user asks about product details, internal concepts, processes, architecture, documentation, roadmaps, or anything that may be stored in the index.\n- Conversation History:\n Use this to maintain continuity when the user is referring to previous turns. \n Do not treat history as a factual source.\n- Conversation File Context:\n Use this when the user asks about a document they uploaded or refers directly to its contents.\n- URL Ingestion Tool:\n Use this **only** when the user explicitly asks you to read, summarize, or analyze the content of a URL.\n Do not ingest URLs automatically.\n- Calculator / Expression Evaluation Tool:\n Use this when the user asks to compare numbers, compute estimates, calculate totals, analyze pricing, or answer any question requiring mathematics or quantitative reasoning.\n If the answer requires arithmetic, call the calculator tool rather than calculating internally.\n### Retrieval Decision Rules\nUse OpenSearch **whenever**:\n1. The question may be answered from internal or indexed data.\n2. The user references team names, product names, release plans, configurations, requirements, or official information.\n3. The user needs a factual, grounded answer.\nDo **not** use retrieval if:\n- The question is purely creative (e.g., storytelling, analogies) or personal preference.\n- The user simply wants text reformatted or rewritten from what is already present in the conversation.\nWhen uncertain → **Retrieve.** Retrieval is low risk and improves grounding.\n### URL Ingestion Rules\nOnly ingest URLs when the user explicitly says:\n- \"Read this link\"\n- \"Summarize this webpage\"\n- \"What does this site say?\"\n- \"Ingest this URL\"\nIf unclear → ask a clarifying question.\n### Calculator Usage Rules\nUse the calculator when:\n- Performing arithmetic\n- Estimating totals\n- Comparing values\n- Modeling cost, time, effort, scale, or projections\nDo not perform math internally. **Call the calculator tool instead.**\n### Answer Construction Rules\n1. When asked: \"What is OpenRAG\", answer the following:\n\"OpenRAG is an open-source package for building agentic RAG systems. It supports integration with a wide range of orchestration tools, vector databases, and LLM providers. OpenRAG connects and amplifies three popular, proven open-source projects into one powerful platform:\n**Langflow** – Langflow is a powerful tool to build and deploy AI agents and MCP servers. [Read more](https://www.langflow.org/)\n**OpenSearch** – OpenSearch is an open source, search and observability suite that brings order to unstructured data at scale. [Read more](https://opensearch.org/)\n**Docling** – Docling simplifies document processing with advanced PDF understanding, OCR support, and seamless AI integrations. Parse PDFs, DOCX, PPTX, images & more. [Read more](https://www.docling.ai/)\"\n2. Synthesize retrieved or ingested content in your own words.\n3. Support factual claims with citations in the format:\n (Source: )\n4. If no supporting evidence is found:\n Say: \"No relevant supporting sources were found for that request.\"\n5. Never invent facts or hallucinate details.\n6. Be concise, direct, and confident. \n7. Do not reveal internal chain-of-thought." }, @@ -2844,6 +2951,7 @@ "slider_input": false, "title_case": false, "tool_mode": false, + "track_in_telemetry": false, "type": "slider", "value": 0.1 }, @@ -2862,6 +2970,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": 700 }, @@ -2882,6 +2991,7 @@ "show": true, "title_case": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "other", "value": "" }, @@ -2900,6 +3010,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "bool", "value": true } @@ -2919,7 +3030,7 @@ "x": 1629.578423203229, "y": 451.946400444934 }, - "selected": true, + "selected": false, "type": "genericNode" }, { @@ -2941,7 +3052,7 @@ ], "frozen": false, "icon": "calculator", - "last_updated": "2025-11-11T21:40:50.133Z", + "last_updated": "2025-11-24T18:02:41.468Z", "legacy": false, "metadata": { "code_hash": "5fcfa26be77d", @@ -3037,6 +3148,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "tools", "value": [ { @@ -3074,24 +3186,24 @@ "width": 320 }, "position": { - "x": 720.5458956206228, - "y": 1220.8583072055585 + "x": 740.6385299138288, + "y": 1287.3185590984713 }, "selected": false, "type": "genericNode" } ], "viewport": { - "x": -37.553133726465376, - "y": 28.26764925979927, - "zoom": 0.6144649479685779 + "x": 91.72060325190853, + "y": -32.331644762411656, + "zoom": 0.6206869895512208 } }, "description": "OpenRAG OpenSearch Agent", "endpoint_name": null, "id": "1098eea1-6649-4e1d-aed1-b77249fb8dd0", "is_component": false, - "last_tested_version": "1.7.0", + "last_tested_version": "1.7.0.dev19", "name": "OpenRAG OpenSearch Agent", "tags": [ "assistants", diff --git a/flows/openrag_nudges.json b/flows/openrag_nudges.json index 5bbfef8d..702353a3 100644 --- a/flows/openrag_nudges.json +++ b/flows/openrag_nudges.json @@ -174,6 +174,7 @@ }, { "animated": false, + "className": "", "data": { "sourceHandle": { "dataType": "TextInput", @@ -1784,23 +1785,32 @@ "request_timeout", "max_retries", "show_progress_bar", - "model_kwargs" + "model_kwargs", + "truncate_input_tokens", + "input_text" ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-11T21:49:56.409Z", "legacy": false, "metadata": { - "code_hash": "bb03f97be707", + "code_hash": "c5e0a4535a27", "dependencies": { "dependencies": [ + { + "name": "requests", + "version": "2.32.5" + }, + { + "name": "ibm_watsonx_ai", + "version": "1.4.2" + }, { "name": "langchain_openai", "version": "0.3.23" }, { "name": "lfx", - "version": null + "version": "0.2.0.dev19" }, { "name": "langchain_ollama", @@ -1815,9 +1825,9 @@ "version": "0.3.19" } ], - "total_dependencies": 5 + "total_dependencies": 7 }, - "module": "lfx.components.models.embedding_model.EmbeddingModelComponent" + "module": "custom_components.embedding_model" }, "minimized": false, "output_types": [], @@ -1844,7 +1854,7 @@ "_type": "Component", "api_base": { "_input_type": "MessageTextInput", - "advanced": false, + "advanced": true, "display_name": "API Base URL", "dynamic": false, "info": "Base URL for the API. Leave empty for default.", @@ -1862,6 +1872,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -1880,6 +1891,7 @@ "required": true, "show": true, "title_case": false, + "track_in_telemetry": false, "type": "str", "value": "OPENAI_API_KEY" }, @@ -1910,6 +1922,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "https://us-south.ml.cloud.ibm.com" }, @@ -1928,6 +1941,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": 1000 }, @@ -1947,7 +1961,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import IBM_WATSONX_URLS, WATSONX_EMBEDDING_MODEL_NAMES\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n return OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n return OllamaEmbeddings(\n model=model,\n base_url=transformed_base_url or \"http://localhost:11434\",\n **model_kwargs,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n params = {\n \"model_id\": model,\n \"url\": base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\",\n \"apikey\": api_key,\n }\n\n params[\"project_id\"] = project_id\n\n return WatsonxEmbeddings(**params)\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = WATSONX_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = WATSONX_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n return OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n return OllamaEmbeddings(\n model=model,\n base_url=transformed_base_url or \"http://localhost:11434\",\n **model_kwargs,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n credentials = Credentials(\n api_key=self.api_key,\n url=base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\",\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n return WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" }, "dimensions": { "_input_type": "IntInput", @@ -1964,9 +1978,29 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": "" }, + "input_text": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Include the original text in the output", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "input_text", + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, "max_retries": { "_input_type": "IntInput", "advanced": true, @@ -1982,6 +2016,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": 3 }, @@ -1996,16 +2031,21 @@ "info": "Select the embedding model to use", "name": "model", "options": [ - "text-embedding-3-small" + "text-embedding-3-small", + "text-embedding-3-large", + "text-embedding-ada-002" ], "options_metadata": [], "placeholder": "", + "real_time_refresh": true, + "refresh_button": true, "required": false, "show": true, "title_case": false, "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "text-embedding-3-small" }, @@ -2024,6 +2064,7 @@ "title_case": false, "tool_mode": false, "trace_as_input": true, + "track_in_telemetry": false, "type": "dict", "value": {} }, @@ -2048,6 +2089,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -2071,6 +2113,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -2108,6 +2151,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "OpenAI" }, @@ -2126,6 +2170,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "float", "value": "" }, @@ -2144,8 +2189,28 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "bool", "value": false + }, + "truncate_input_tokens": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Truncate Input Tokens", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "truncate_input_tokens", + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 200 } }, "tool_mode": false @@ -2156,7 +2221,7 @@ "dragging": false, "id": "EmbeddingModel-26o1e", "measured": { - "height": 451, + "height": 369, "width": 320 }, "position": { @@ -2195,7 +2260,7 @@ ], "frozen": false, "icon": "brain-circuit", - "last_updated": "2025-11-11T21:51:53.901Z", + "last_updated": "2025-11-24T18:03:45.208Z", "legacy": false, "metadata": { "code_hash": "694ffc4b17b8", @@ -2589,7 +2654,7 @@ "x": 2151.7120459180214, "y": 419.8653051925172 }, - "selected": true, + "selected": false, "type": "genericNode" }, { @@ -2707,16 +2772,16 @@ } ], "viewport": { - "x": -98.41517694131858, - "y": 122.09140842027057, - "zoom": 0.5043780525896302 + "x": -206.89994136968403, + "y": 74.41941219497068, + "zoom": 0.6126010241500153 } }, "description": "OpenRAG OpenSearch Nudges generator, based on the OpenSearch documents and the chat history.", "endpoint_name": null, "id": "ebc01d31-1976-46ce-a385-b0240327226c", "is_component": false, - "last_tested_version": "1.7.0", + "last_tested_version": "1.7.0.dev19", "name": "OpenRAG OpenSearch Nudges", "tags": [ "assistants", diff --git a/flows/openrag_url_mcp.json b/flows/openrag_url_mcp.json index 278ee2c8..0333faf1 100644 --- a/flows/openrag_url_mcp.json +++ b/flows/openrag_url_mcp.json @@ -231,6 +231,8 @@ "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-0XHyoœ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}" }, { + "animated": false, + "className": "", "data": { "sourceHandle": { "dataType": "EmbeddingModel", @@ -250,6 +252,7 @@ } }, "id": "xy-edge__EmbeddingModel-XjV5v{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-XjV5vœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-OpenSearchHybrid-Ve6bS{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchHybrid-Ve6bSœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}", + "selected": false, "source": "EmbeddingModel-XjV5v", "sourceHandle": "{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-XjV5vœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}", "target": "OpenSearchHybrid-Ve6bS", @@ -1553,7 +1556,7 @@ ], "frozen": false, "icon": "table", - "last_updated": "2025-11-11T15:07:55.352Z", + "last_updated": "2025-11-24T17:58:32.464Z", "legacy": false, "lf_version": "1.6.0", "metadata": { @@ -1969,7 +1972,7 @@ ], "frozen": false, "icon": "table", - "last_updated": "2025-11-11T15:07:55.353Z", + "last_updated": "2025-11-24T17:58:32.465Z", "legacy": false, "lf_version": "1.6.0", "metadata": { @@ -2385,7 +2388,7 @@ ], "frozen": false, "icon": "table", - "last_updated": "2025-11-11T15:07:55.353Z", + "last_updated": "2025-11-24T17:58:32.465Z", "legacy": false, "metadata": { "code_hash": "b4d6b19b6eef", @@ -3355,23 +3358,32 @@ "request_timeout", "max_retries", "show_progress_bar", - "model_kwargs" + "model_kwargs", + "truncate_input_tokens", + "input_text" ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-11T15:08:05.989Z", "legacy": false, "metadata": { - "code_hash": "bb03f97be707", + "code_hash": "c5e0a4535a27", "dependencies": { "dependencies": [ + { + "name": "requests", + "version": "2.32.5" + }, + { + "name": "ibm_watsonx_ai", + "version": "1.4.2" + }, { "name": "langchain_openai", "version": "0.3.23" }, { "name": "lfx", - "version": null + "version": "0.2.0.dev19" }, { "name": "langchain_ollama", @@ -3386,9 +3398,9 @@ "version": "0.3.19" } ], - "total_dependencies": 5 + "total_dependencies": 7 }, - "module": "lfx.components.models.embedding_model.EmbeddingModelComponent" + "module": "custom_components.embedding_model" }, "minimized": false, "output_types": [], @@ -3433,6 +3445,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -3451,6 +3464,7 @@ "required": true, "show": true, "title_case": false, + "track_in_telemetry": false, "type": "str", "value": "OPENAI_API_KEY" }, @@ -3481,6 +3495,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "https://us-south.ml.cloud.ibm.com" }, @@ -3499,6 +3514,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": 1000 }, @@ -3518,7 +3534,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import IBM_WATSONX_URLS, WATSONX_EMBEDDING_MODEL_NAMES\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n return OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n return OllamaEmbeddings(\n model=model,\n base_url=transformed_base_url or \"http://localhost:11434\",\n **model_kwargs,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n params = {\n \"model_id\": model,\n \"url\": base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\",\n \"apikey\": api_key,\n }\n\n params[\"project_id\"] = project_id\n\n return WatsonxEmbeddings(**params)\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = WATSONX_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = WATSONX_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n return OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n return OllamaEmbeddings(\n model=model,\n base_url=transformed_base_url or \"http://localhost:11434\",\n **model_kwargs,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n credentials = Credentials(\n api_key=self.api_key,\n url=base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\",\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n return WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" }, "dimensions": { "_input_type": "IntInput", @@ -3535,9 +3551,29 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": "" }, + "input_text": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Include the original text in the output", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "input_text", + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, "max_retries": { "_input_type": "IntInput", "advanced": true, @@ -3553,6 +3589,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": 3 }, @@ -3573,12 +3610,15 @@ ], "options_metadata": [], "placeholder": "", + "real_time_refresh": true, + "refresh_button": true, "required": false, "show": true, "title_case": false, "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "text-embedding-3-small" }, @@ -3597,6 +3637,7 @@ "title_case": false, "tool_mode": false, "trace_as_input": true, + "track_in_telemetry": false, "type": "dict", "value": {} }, @@ -3621,6 +3662,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -3644,6 +3686,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -3681,6 +3724,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "OpenAI" }, @@ -3699,6 +3743,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "float", "value": "" }, @@ -3717,8 +3762,28 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "bool", "value": false + }, + "truncate_input_tokens": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Truncate Input Tokens", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "truncate_input_tokens", + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 200 } }, "tool_mode": false @@ -3736,22 +3801,22 @@ "x": 2066.3681917820168, "y": 2053.0594731518368 }, - "selected": false, + "selected": true, "type": "genericNode" } ], "viewport": { - "x": -376.03081395180425, - "y": -548.1133759270245, - "zoom": 0.5197673958703216 + "x": -425.92312760235086, + "y": -699.5243463797829, + "zoom": 0.5890166507565666 } }, "description": "This flow is to ingest the URL to open search.", "endpoint_name": null, "id": "72c3d17c-2dac-4a73-b48a-6518473d7830", "is_component": false, - "last_tested_version": "1.7.0", "mcp_enabled": true, + "last_tested_version": "1.7.0.dev19", "name": "OpenSearch URL Ingestion Flow", "tags": [ "openai", From 6495cd96e98d0b5ae7ebbc87055fb282eceb39b6 Mon Sep 17 00:00:00 2001 From: phact Date: Mon, 24 Nov 2025 18:04:13 -0500 Subject: [PATCH 03/43] switch to langflowai/ in dockerhub --- .github/workflows/build-multiarch.yml | 64 +++++++++++++-------------- Makefile | 10 ++--- docker-compose-cpu.yml | 8 ++-- docker-compose.yml | 8 ++-- 4 files changed, 45 insertions(+), 45 deletions(-) diff --git a/.github/workflows/build-multiarch.yml b/.github/workflows/build-multiarch.yml index b8ee1a28..f516b62d 100644 --- a/.github/workflows/build-multiarch.yml +++ b/.github/workflows/build-multiarch.yml @@ -53,13 +53,13 @@ jobs: # backend - image: backend file: ./Dockerfile.backend - tag: phact/openrag-backend + tag: langflowai/openrag-backend platform: linux/amd64 arch: amd64 runs-on: ubuntu-latest-16-cores - image: backend file: ./Dockerfile.backend - tag: phact/openrag-backend + tag: langflowai/openrag-backend platform: linux/arm64 arch: arm64 runs-on: [self-hosted, linux, ARM64, langflow-ai-arm64-2] @@ -67,13 +67,13 @@ jobs: # frontend - image: frontend file: ./Dockerfile.frontend - tag: phact/openrag-frontend + tag: langflowai/openrag-frontend platform: linux/amd64 arch: amd64 runs-on: ubuntu-latest-16-cores - image: frontend file: ./Dockerfile.frontend - tag: phact/openrag-frontend + tag: langflowai/openrag-frontend platform: linux/arm64 arch: arm64 runs-on: [self-hosted, linux, ARM64, langflow-ai-arm64-2] @@ -81,13 +81,13 @@ jobs: # langflow - image: langflow file: ./Dockerfile.langflow - tag: phact/openrag-langflow + tag: langflowai/openrag-langflow platform: linux/amd64 arch: amd64 runs-on: ubuntu-latest-16-cores - image: langflow file: ./Dockerfile.langflow - tag: phact/openrag-langflow + tag: langflowai/openrag-langflow platform: linux/arm64 arch: arm64 runs-on: self-hosted @@ -95,13 +95,13 @@ jobs: # opensearch - image: opensearch file: ./Dockerfile - tag: phact/openrag-opensearch + tag: langflowai/openrag-opensearch platform: linux/amd64 arch: amd64 runs-on: ubuntu-latest-16-cores - image: opensearch file: ./Dockerfile - tag: phact/openrag-opensearch + tag: langflowai/openrag-opensearch platform: linux/arm64 arch: arm64 runs-on: [self-hosted, linux, ARM64, langflow-ai-arm64-2] @@ -165,40 +165,40 @@ jobs: VERSION=${{ steps.version.outputs.version }} # Create versioned tags - docker buildx imagetools create -t phact/openrag-backend:$VERSION \ - phact/openrag-backend:$VERSION-amd64 \ - phact/openrag-backend:$VERSION-arm64 + docker buildx imagetools create -t langflowai/openrag-backend:$VERSION \ + langflowai/openrag-backend:$VERSION-amd64 \ + langflowai/openrag-backend:$VERSION-arm64 - docker buildx imagetools create -t phact/openrag-frontend:$VERSION \ - phact/openrag-frontend:$VERSION-amd64 \ - phact/openrag-frontend:$VERSION-arm64 + docker buildx imagetools create -t langflowai/openrag-frontend:$VERSION \ + langflowai/openrag-frontend:$VERSION-amd64 \ + langflowai/openrag-frontend:$VERSION-arm64 - docker buildx imagetools create -t phact/openrag-langflow:$VERSION \ - phact/openrag-langflow:$VERSION-amd64 \ - phact/openrag-langflow:$VERSION-arm64 + docker buildx imagetools create -t langflowai/openrag-langflow:$VERSION \ + langflowai/openrag-langflow:$VERSION-amd64 \ + langflowai/openrag-langflow:$VERSION-arm64 - docker buildx imagetools create -t phact/openrag-opensearch:$VERSION \ - phact/openrag-opensearch:$VERSION-amd64 \ - phact/openrag-opensearch:$VERSION-arm64 + docker buildx imagetools create -t langflowai/openrag-opensearch:$VERSION \ + langflowai/openrag-opensearch:$VERSION-amd64 \ + langflowai/openrag-opensearch:$VERSION-arm64 # Only update latest tags if version is numeric if [[ "$VERSION" =~ ^[0-9.-]+$ ]]; then echo "Updating latest tags for production release: $VERSION" - docker buildx imagetools create -t phact/openrag-backend:latest \ - phact/openrag-backend:$VERSION-amd64 \ - phact/openrag-backend:$VERSION-arm64 + docker buildx imagetools create -t langflowai/openrag-backend:latest \ + langflowai/openrag-backend:$VERSION-amd64 \ + langflowai/openrag-backend:$VERSION-arm64 - docker buildx imagetools create -t phact/openrag-frontend:latest \ - phact/openrag-frontend:$VERSION-amd64 \ - phact/openrag-frontend:$VERSION-arm64 + docker buildx imagetools create -t langflowai/openrag-frontend:latest \ + langflowai/openrag-frontend:$VERSION-amd64 \ + langflowai/openrag-frontend:$VERSION-arm64 - docker buildx imagetools create -t phact/openrag-langflow:latest \ - phact/openrag-langflow:$VERSION-amd64 \ - phact/openrag-langflow:$VERSION-arm64 + docker buildx imagetools create -t langflowai/openrag-langflow:latest \ + langflowai/openrag-langflow:$VERSION-amd64 \ + langflowai/openrag-langflow:$VERSION-arm64 - docker buildx imagetools create -t phact/openrag-opensearch:latest \ - phact/openrag-opensearch:$VERSION-amd64 \ - phact/openrag-opensearch:$VERSION-arm64 + docker buildx imagetools create -t langflowai/openrag-opensearch:latest \ + langflowai/openrag-opensearch:$VERSION-amd64 \ + langflowai/openrag-opensearch:$VERSION-arm64 else echo "Skipping latest tags - version: $VERSION (not numeric)" fi diff --git a/Makefile b/Makefile index f47bba28..b5804f77 100644 --- a/Makefile +++ b/Makefile @@ -210,7 +210,7 @@ test-ci: echo "Pulling latest images..."; \ docker compose -f docker-compose-cpu.yml pull; \ echo "Building OpenSearch image override..."; \ - docker build --no-cache -t phact/openrag-opensearch:latest -f Dockerfile .; \ + docker build --no-cache -t langflowai/openrag-opensearch:latest -f Dockerfile .; \ echo "Starting infra (OpenSearch + Dashboards + Langflow) with CPU containers"; \ docker compose -f docker-compose-cpu.yml up -d opensearch dashboards langflow; \ echo "Starting docling-serve..."; \ @@ -288,10 +288,10 @@ test-ci-local: echo "Cleaning up old containers and volumes..."; \ docker compose -f docker-compose-cpu.yml down -v 2>/dev/null || true; \ echo "Building all images locally..."; \ - docker build -t phact/openrag-opensearch:latest -f Dockerfile .; \ - docker build -t phact/openrag-backend:latest -f Dockerfile.backend .; \ - docker build -t phact/openrag-frontend:latest -f Dockerfile.frontend .; \ - docker build -t phact/openrag-langflow:latest -f Dockerfile.langflow .; \ + docker build -t langflowai/openrag-opensearch:latest -f Dockerfile .; \ + docker build -t langflowai/openrag-backend:latest -f Dockerfile.backend .; \ + docker build -t langflowai/openrag-frontend:latest -f Dockerfile.frontend .; \ + docker build -t langflowai/openrag-langflow:latest -f Dockerfile.langflow .; \ echo "Starting infra (OpenSearch + Dashboards + Langflow) with CPU containers"; \ docker compose -f docker-compose-cpu.yml up -d opensearch dashboards langflow; \ echo "Starting docling-serve..."; \ diff --git a/docker-compose-cpu.yml b/docker-compose-cpu.yml index 50e118b7..c5cd15f3 100644 --- a/docker-compose-cpu.yml +++ b/docker-compose-cpu.yml @@ -1,6 +1,6 @@ services: opensearch: - image: phact/openrag-opensearch:${OPENRAG_VERSION:-latest} + image: langflowai/openrag-opensearch:${OPENRAG_VERSION:-latest} #build: # context: . # dockerfile: Dockerfile @@ -44,7 +44,7 @@ services: - "5601:5601" openrag-backend: - image: phact/openrag-backend:${OPENRAG_VERSION:-latest} + image: langflowai/openrag-backend:${OPENRAG_VERSION:-latest} # build: # context: . # dockerfile: Dockerfile.backend @@ -86,7 +86,7 @@ services: - ./flows:/app/flows:U,z openrag-frontend: - image: phact/openrag-frontend:${OPENRAG_VERSION:-latest} + image: langflowai/openrag-frontend:${OPENRAG_VERSION:-latest} # build: # context: . # dockerfile: Dockerfile.frontend @@ -101,7 +101,7 @@ services: langflow: volumes: - ./flows:/app/flows:U,z - image: phact/openrag-langflow:${LANGFLOW_VERSION:-latest} + image: langflowai/openrag-langflow:${LANGFLOW_VERSION:-latest} # build: # context: . # dockerfile: Dockerfile.langflow diff --git a/docker-compose.yml b/docker-compose.yml index 7ba0cea8..fbba580f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,6 +1,6 @@ services: opensearch: - image: phact/openrag-opensearch:${OPENRAG_VERSION:-latest} + image: langflowai/openrag-opensearch:${OPENRAG_VERSION:-latest} #build: #context: . #dockerfile: Dockerfile @@ -44,7 +44,7 @@ services: - "5601:5601" openrag-backend: - image: phact/openrag-backend:${OPENRAG_VERSION:-latest} + image: langflowai/openrag-backend:${OPENRAG_VERSION:-latest} # build: # context: . # dockerfile: Dockerfile.backend @@ -88,7 +88,7 @@ services: gpus: all openrag-frontend: - image: phact/openrag-frontend:${OPENRAG_VERSION:-latest} + image: langflowai/openrag-frontend:${OPENRAG_VERSION:-latest} # build: # context: . # dockerfile: Dockerfile.frontend @@ -103,7 +103,7 @@ services: langflow: volumes: - ./flows:/app/flows:U,z - image: phact/openrag-langflow:${LANGFLOW_VERSION:-latest} + image: langflowai/openrag-langflow:${LANGFLOW_VERSION:-latest} # build: # context: . # dockerfile: Dockerfile.langflow From 88b90a17b2a7aeebf7d7fdbf9e0d3ec9438cf1bf Mon Sep 17 00:00:00 2001 From: phact Date: Mon, 24 Nov 2025 18:10:25 -0500 Subject: [PATCH 04/43] v0.1.39-rc1 --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 09ce2e9b..8065286e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "openrag" -version = "0.1.38" +version = "0.1.39-rc1" description = "Add your description here" readme = "README.md" requires-python = ">=3.13" From f370afbd221c779e08a097b9d999dc28552c53de Mon Sep 17 00:00:00 2001 From: phact Date: Mon, 24 Nov 2025 21:27:19 -0500 Subject: [PATCH 05/43] rc2 --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 8065286e..b4003082 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "openrag" -version = "0.1.39-rc1" +version = "0.1.39-rc2" description = "Add your description here" readme = "README.md" requires-python = ">=3.13" From 45b4db8afc68f92caf20de4a7a7e9a95e9006877 Mon Sep 17 00:00:00 2001 From: Mike Fortman Date: Tue, 25 Nov 2025 12:00:58 -0600 Subject: [PATCH 06/43] update limits so knowledge filters file list isn't limited to one --- frontend/app/api/queries/useGetSearchQuery.ts | 14 +++++++++----- frontend/lib/constants.ts | 7 +++++++ 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/frontend/app/api/queries/useGetSearchQuery.ts b/frontend/app/api/queries/useGetSearchQuery.ts index e5407832..1f2cceb2 100644 --- a/frontend/app/api/queries/useGetSearchQuery.ts +++ b/frontend/app/api/queries/useGetSearchQuery.ts @@ -4,6 +4,7 @@ import { useQueryClient, } from "@tanstack/react-query"; import type { ParsedQueryData } from "@/contexts/knowledge-filter-context"; +import { SEARCH_CONSTANTS } from "@/lib/constants"; export interface SearchPayload { query: string; @@ -70,13 +71,16 @@ export const useGetSearchQuery = ( async function getFiles(): Promise { try { + // For wildcard queries, use a high limit to get all files + // Otherwise use the limit from queryData or default to 100 + const isWildcardQuery = effectiveQuery.trim() === "*" || effectiveQuery.trim() === ""; + const searchLimit = isWildcardQuery + ? SEARCH_CONSTANTS.WILDCARD_QUERY_LIMIT + : (queryData?.limit || 100); + const searchPayload: SearchPayload = { query: effectiveQuery, - limit: - queryData?.limit || - (effectiveQuery.trim() === "*" || effectiveQuery.trim() === "" - ? 10000 - : 10), // Maximum allowed limit for wildcard searches + limit: searchLimit, scoreThreshold: queryData?.scoreThreshold || 0, }; if (queryData?.filters) { diff --git a/frontend/lib/constants.ts b/frontend/lib/constants.ts index 402ace74..cc5d2bdb 100644 --- a/frontend/lib/constants.ts +++ b/frontend/lib/constants.ts @@ -25,6 +25,13 @@ export const UI_CONSTANTS = { MAX_SYSTEM_PROMPT_CHARS: 4000, } as const; +/** + * Search Constants + */ +export const SEARCH_CONSTANTS = { + WILDCARD_QUERY_LIMIT: 10000, // Maximum allowed limit for wildcard searches +} as const; + export const ANIMATION_DURATION = 0.4; export const SIDEBAR_WIDTH = 280; export const HEADER_HEIGHT = 54; From 608fc798b065b1f73b696d5f56449ff3f819a6c7 Mon Sep 17 00:00:00 2001 From: Lucas Oliveira Date: Tue, 25 Nov 2025 18:09:47 -0300 Subject: [PATCH 07/43] Deleted docker compose cpu, added gpu override --- docker-compose-cpu.yml | 141 ----------------------------------------- docker-compose.gpu.yml | 7 ++ docker-compose.yml | 6 +- 3 files changed, 9 insertions(+), 145 deletions(-) delete mode 100644 docker-compose-cpu.yml create mode 100644 docker-compose.gpu.yml diff --git a/docker-compose-cpu.yml b/docker-compose-cpu.yml deleted file mode 100644 index 50e118b7..00000000 --- a/docker-compose-cpu.yml +++ /dev/null @@ -1,141 +0,0 @@ -services: - opensearch: - image: phact/openrag-opensearch:${OPENRAG_VERSION:-latest} - #build: - # context: . - # dockerfile: Dockerfile - container_name: os - depends_on: - - openrag-backend - environment: - - discovery.type=single-node - - OPENSEARCH_INITIAL_ADMIN_PASSWORD=${OPENSEARCH_PASSWORD} - # Run security setup in background after OpenSearch starts - command: > - bash -c " - # Ensure data directory has correct permissions - sudo chown -R opensearch:opensearch /usr/share/opensearch/data || true - - # Start OpenSearch in background - /usr/share/opensearch/opensearch-docker-entrypoint.sh opensearch & - - # Wait a bit for OpenSearch to start, then apply security config - sleep 10 && /usr/share/opensearch/setup-security.sh & - - # Wait for background processes - wait - " - ports: - - "9200:9200" - - "9600:9600" - volumes: - - ${OPENSEARCH_DATA_PATH:-./opensearch-data}:/usr/share/opensearch/data:Z - - dashboards: - image: opensearchproject/opensearch-dashboards:3.0.0 - container_name: osdash - depends_on: - - opensearch - environment: - OPENSEARCH_HOSTS: '["https://opensearch:9200"]' - OPENSEARCH_USERNAME: "admin" - OPENSEARCH_PASSWORD: ${OPENSEARCH_PASSWORD} - ports: - - "5601:5601" - - openrag-backend: - image: phact/openrag-backend:${OPENRAG_VERSION:-latest} - # build: - # context: . - # dockerfile: Dockerfile.backend - container_name: openrag-backend - depends_on: - - langflow - environment: - - OPENSEARCH_HOST=opensearch - - LANGFLOW_URL=http://langflow:7860 - - LANGFLOW_PUBLIC_URL=${LANGFLOW_PUBLIC_URL} - - LANGFLOW_AUTO_LOGIN=${LANGFLOW_AUTO_LOGIN} - - LANGFLOW_SECRET_KEY=${LANGFLOW_SECRET_KEY} - - LANGFLOW_SUPERUSER=${LANGFLOW_SUPERUSER} - - LANGFLOW_SUPERUSER_PASSWORD=${LANGFLOW_SUPERUSER_PASSWORD} - - LANGFLOW_CHAT_FLOW_ID=${LANGFLOW_CHAT_FLOW_ID} - - LANGFLOW_INGEST_FLOW_ID=${LANGFLOW_INGEST_FLOW_ID} - - LANGFLOW_URL_INGEST_FLOW_ID=${LANGFLOW_URL_INGEST_FLOW_ID} - - DISABLE_INGEST_WITH_LANGFLOW=${DISABLE_INGEST_WITH_LANGFLOW:-false} - - NUDGES_FLOW_ID=${NUDGES_FLOW_ID} - - OPENSEARCH_PORT=9200 - - OPENSEARCH_USERNAME=admin - - OPENSEARCH_PASSWORD=${OPENSEARCH_PASSWORD} - - OPENAI_API_KEY=${OPENAI_API_KEY} - - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} - - WATSONX_API_KEY=${WATSONX_API_KEY} - - WATSONX_ENDPOINT=${WATSONX_ENDPOINT} - - WATSONX_PROJECT_ID=${WATSONX_PROJECT_ID} - - OLLAMA_ENDPOINT=${OLLAMA_ENDPOINT} - - GOOGLE_OAUTH_CLIENT_ID=${GOOGLE_OAUTH_CLIENT_ID} - - GOOGLE_OAUTH_CLIENT_SECRET=${GOOGLE_OAUTH_CLIENT_SECRET} - - MICROSOFT_GRAPH_OAUTH_CLIENT_ID=${MICROSOFT_GRAPH_OAUTH_CLIENT_ID} - - MICROSOFT_GRAPH_OAUTH_CLIENT_SECRET=${MICROSOFT_GRAPH_OAUTH_CLIENT_SECRET} - - WEBHOOK_BASE_URL=${WEBHOOK_BASE_URL} - - AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} - - AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} - volumes: - - ./documents:/app/documents:Z - - ./keys:/app/keys:Z - - ./flows:/app/flows:U,z - - openrag-frontend: - image: phact/openrag-frontend:${OPENRAG_VERSION:-latest} - # build: - # context: . - # dockerfile: Dockerfile.frontend - container_name: openrag-frontend - depends_on: - - openrag-backend - environment: - - OPENRAG_BACKEND_HOST=openrag-backend - ports: - - "3000:3000" - - langflow: - volumes: - - ./flows:/app/flows:U,z - image: phact/openrag-langflow:${LANGFLOW_VERSION:-latest} - # build: - # context: . - # dockerfile: Dockerfile.langflow - container_name: langflow - ports: - - "7860:7860" - environment: - - LANGFLOW_DEACTIVATE_TRACING=true - - OPENAI_API_KEY=${OPENAI_API_KEY} - - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} - - WATSONX_API_KEY=${WATSONX_API_KEY} - - WATSONX_ENDPOINT=${WATSONX_ENDPOINT} - - WATSONX_PROJECT_ID=${WATSONX_PROJECT_ID} - - OLLAMA_BASE_URL=${OLLAMA_ENDPOINT} - - LANGFLOW_LOAD_FLOWS_PATH=/app/flows - - LANGFLOW_SECRET_KEY=${LANGFLOW_SECRET_KEY} - - JWT=None - - OWNER=None - - OWNER_NAME=None - - OWNER_EMAIL=None - - CONNECTOR_TYPE=system - - CONNECTOR_TYPE_URL=url - - OPENRAG-QUERY-FILTER="{}" - - OPENSEARCH_PASSWORD=${OPENSEARCH_PASSWORD} - - FILENAME=None - - MIMETYPE=None - - FILESIZE=0 - - LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,FILENAME,MIMETYPE,FILESIZE - - LANGFLOW_LOG_LEVEL=DEBUG - - LANGFLOW_AUTO_LOGIN=${LANGFLOW_AUTO_LOGIN} - - LANGFLOW_SUPERUSER=${LANGFLOW_SUPERUSER} - - LANGFLOW_SUPERUSER_PASSWORD=${LANGFLOW_SUPERUSER_PASSWORD} - - LANGFLOW_NEW_USER_IS_ACTIVE=${LANGFLOW_NEW_USER_IS_ACTIVE} - - LANGFLOW_ENABLE_SUPERUSER_CLI=${LANGFLOW_ENABLE_SUPERUSER_CLI} - # - DEFAULT_FOLDER_NAME=OpenRAG - - HIDE_GETTING_STARTED_PROGRESS=true - diff --git a/docker-compose.gpu.yml b/docker-compose.gpu.yml new file mode 100644 index 00000000..4496c4ac --- /dev/null +++ b/docker-compose.gpu.yml @@ -0,0 +1,7 @@ +services: + openrag-backend: + environment: + - NVIDIA_DRIVER_CAPABILITIES=compute,utility + - NVIDIA_VISIBLE_DEVICES=all + gpus: all + diff --git a/docker-compose.yml b/docker-compose.yml index 7ba0cea8..2627aa14 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -56,6 +56,7 @@ services: - LANGFLOW_URL=http://langflow:7860 - LANGFLOW_PUBLIC_URL=${LANGFLOW_PUBLIC_URL} - LANGFLOW_AUTO_LOGIN=${LANGFLOW_AUTO_LOGIN} + - LANGFLOW_SECRET_KEY=${LANGFLOW_SECRET_KEY} - LANGFLOW_SUPERUSER=${LANGFLOW_SUPERUSER} - LANGFLOW_SUPERUSER_PASSWORD=${LANGFLOW_SUPERUSER_PASSWORD} - LANGFLOW_CHAT_FLOW_ID=${LANGFLOW_CHAT_FLOW_ID} @@ -72,8 +73,6 @@ services: - WATSONX_ENDPOINT=${WATSONX_ENDPOINT} - WATSONX_PROJECT_ID=${WATSONX_PROJECT_ID} - OLLAMA_ENDPOINT=${OLLAMA_ENDPOINT} - - NVIDIA_DRIVER_CAPABILITIES=compute,utility - - NVIDIA_VISIBLE_DEVICES=all - GOOGLE_OAUTH_CLIENT_ID=${GOOGLE_OAUTH_CLIENT_ID} - GOOGLE_OAUTH_CLIENT_SECRET=${GOOGLE_OAUTH_CLIENT_SECRET} - MICROSOFT_GRAPH_OAUTH_CLIENT_ID=${MICROSOFT_GRAPH_OAUTH_CLIENT_ID} @@ -85,7 +84,6 @@ services: - ./documents:/app/documents:Z - ./keys:/app/keys:Z - ./flows:/app/flows:U,z - gpus: all openrag-frontend: image: phact/openrag-frontend:${OPENRAG_VERSION:-latest} @@ -127,10 +125,10 @@ services: - CONNECTOR_TYPE=system - CONNECTOR_TYPE_URL=url - OPENRAG-QUERY-FILTER="{}" + - OPENSEARCH_PASSWORD=${OPENSEARCH_PASSWORD} - FILENAME=None - MIMETYPE=None - FILESIZE=0 - - OPENSEARCH_PASSWORD=${OPENSEARCH_PASSWORD} - LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,FILENAME,MIMETYPE,FILESIZE - LANGFLOW_LOG_LEVEL=DEBUG - LANGFLOW_AUTO_LOGIN=${LANGFLOW_AUTO_LOGIN} From 28cb9fc26b80367cd00d85b8f16cdec38d202e21 Mon Sep 17 00:00:00 2001 From: Lucas Oliveira Date: Tue, 25 Nov 2025 18:10:02 -0300 Subject: [PATCH 08/43] Changed TUI implementation to support new docker compose override --- src/tui/main.py | 2 +- src/tui/managers/container_manager.py | 132 ++++++++++++++++---------- src/tui/screens/monitor.py | 12 +-- 3 files changed, 91 insertions(+), 55 deletions(-) diff --git a/src/tui/main.py b/src/tui/main.py index 19468473..d27db184 100644 --- a/src/tui/main.py +++ b/src/tui/main.py @@ -485,7 +485,7 @@ def copy_compose_files(*, force: bool = False) -> None: logger.debug(f"Could not access compose assets: {e}") return - for filename in ("docker-compose.yml", "docker-compose-cpu.yml"): + for filename in ("docker-compose.yml", "docker-compose.gpu.yml"): destination = Path(filename) if destination.exists() and not force: continue diff --git a/src/tui/managers/container_manager.py b/src/tui/managers/container_manager.py index 2953d550..53fc385e 100644 --- a/src/tui/managers/container_manager.py +++ b/src/tui/managers/container_manager.py @@ -56,15 +56,15 @@ class ContainerManager: self.platform_detector = PlatformDetector() self.runtime_info = self.platform_detector.detect_runtime() self.compose_file = compose_file or self._find_compose_file("docker-compose.yml") - self.cpu_compose_file = self._find_compose_file("docker-compose-cpu.yml") + self.gpu_compose_file = self._find_compose_file("docker-compose.gpu.yml") self.services_cache: Dict[str, ServiceInfo] = {} self.last_status_update = 0 - # Auto-select CPU compose if no GPU available + # Auto-select GPU override if GPU is available try: has_gpu, _ = detect_gpu_devices() - self.use_cpu_compose = not has_gpu + self.use_gpu_compose = has_gpu except Exception: - self.use_cpu_compose = True + self.use_gpu_compose = False # Expected services based on compose files self.expected_services = [ @@ -143,9 +143,15 @@ class ContainerManager: return False, "", "No container runtime available" if cpu_mode is None: - cpu_mode = self.use_cpu_compose - compose_file = self.cpu_compose_file if cpu_mode else self.compose_file - cmd = self.runtime_info.compose_command + ["-f", str(compose_file)] + args + use_gpu = self.use_gpu_compose + else: + use_gpu = not cpu_mode + + # Build compose command with override pattern + cmd = self.runtime_info.compose_command + ["-f", str(self.compose_file)] + if use_gpu and self.gpu_compose_file.exists(): + cmd.extend(["-f", str(self.gpu_compose_file)]) + cmd.extend(args) try: process = await asyncio.create_subprocess_exec( @@ -179,9 +185,15 @@ class ContainerManager: return if cpu_mode is None: - cpu_mode = self.use_cpu_compose - compose_file = self.cpu_compose_file if cpu_mode else self.compose_file - cmd = self.runtime_info.compose_command + ["-f", str(compose_file)] + args + use_gpu = self.use_gpu_compose + else: + use_gpu = not cpu_mode + + # Build compose command with override pattern + cmd = self.runtime_info.compose_command + ["-f", str(self.compose_file)] + if use_gpu and self.gpu_compose_file.exists(): + cmd.extend(["-f", str(self.gpu_compose_file)]) + cmd.extend(args) try: process = await asyncio.create_subprocess_exec( @@ -242,9 +254,15 @@ class ContainerManager: return if cpu_mode is None: - cpu_mode = self.use_cpu_compose - compose_file = self.cpu_compose_file if cpu_mode else self.compose_file - cmd = self.runtime_info.compose_command + ["-f", str(compose_file)] + args + use_gpu = self.use_gpu_compose + else: + use_gpu = not cpu_mode + + # Build compose command with override pattern + cmd = self.runtime_info.compose_command + ["-f", str(self.compose_file)] + if use_gpu and self.gpu_compose_file.exists(): + cmd.extend(["-f", str(self.gpu_compose_file)]) + cmd.extend(args) try: process = await asyncio.create_subprocess_exec( @@ -551,44 +569,61 @@ class ContainerManager: """Get resolved image names from compose files using docker/podman compose, with robust fallbacks.""" images: set[str] = set() - compose_files = [self.compose_file, self.cpu_compose_file] - for compose_file in compose_files: + # Try both GPU and CPU modes to get all images + for use_gpu in [True, False]: try: - if not compose_file or not compose_file.exists(): - continue + # Build compose command with override pattern + cmd = self.runtime_info.compose_command + ["-f", str(self.compose_file)] + if use_gpu and self.gpu_compose_file.exists(): + cmd.extend(["-f", str(self.gpu_compose_file)]) + cmd.extend(["config", "--format", "json"]) - cpu_mode = (compose_file == self.cpu_compose_file) - - # Try JSON format first - success, stdout, _ = await self._run_compose_command( - ["config", "--format", "json"], - cpu_mode=cpu_mode + process = await asyncio.create_subprocess_exec( + *cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + cwd=Path.cwd(), ) + stdout, stderr = await process.communicate() + stdout_text = stdout.decode() if stdout else "" - if success and stdout.strip(): - from_cfg = self._extract_images_from_compose_config(stdout, tried_json=True) + if process.returncode == 0 and stdout_text.strip(): + from_cfg = self._extract_images_from_compose_config(stdout_text, tried_json=True) if from_cfg: images.update(from_cfg) - continue # this compose file succeeded; move to next file + continue # Fallback to YAML output (for older compose versions) - success, stdout, _ = await self._run_compose_command( - ["config"], - cpu_mode=cpu_mode - ) + cmd = self.runtime_info.compose_command + ["-f", str(self.compose_file)] + if use_gpu and self.gpu_compose_file.exists(): + cmd.extend(["-f", str(self.gpu_compose_file)]) + cmd.append("config") - if success and stdout.strip(): - from_cfg = self._extract_images_from_compose_config(stdout, tried_json=False) + process = await asyncio.create_subprocess_exec( + *cmd, + stdout=asyncio.subprocess.PIPE, + stderr=asyncio.subprocess.PIPE, + cwd=Path.cwd(), + ) + stdout, stderr = await process.communicate() + stdout_text = stdout.decode() if stdout else "" + + if process.returncode == 0 and stdout_text.strip(): + from_cfg = self._extract_images_from_compose_config(stdout_text, tried_json=False) if from_cfg: images.update(from_cfg) continue except Exception: - # Keep behavior resilient—just continue to next file + # Keep behavior resilient—just continue to next mode continue # Fallback: manual parsing if compose config didn't work if not images: + compose_files = [self.compose_file] + if self.gpu_compose_file.exists(): + compose_files.append(self.gpu_compose_file) + for compose in compose_files: try: if not compose.exists(): @@ -638,8 +673,11 @@ class ContainerManager: yield False, "No container runtime available" return - # Diagnostic info about compose files - compose_file = self.cpu_compose_file if (cpu_mode if cpu_mode is not None else self.use_cpu_compose) else self.compose_file + # Determine GPU mode + if cpu_mode is None: + use_gpu = self.use_gpu_compose + else: + use_gpu = not cpu_mode # Show the search process for debugging if hasattr(self, '_compose_search_log'): @@ -650,9 +688,12 @@ class ContainerManager: # Show runtime detection info runtime_cmd_str = " ".join(self.runtime_info.compose_command) yield False, f"Using compose command: {runtime_cmd_str}", False - yield False, f"Final compose file: {compose_file.absolute()}", False - if not compose_file.exists(): - yield False, f"ERROR: Compose file not found at {compose_file.absolute()}", False + compose_files_str = str(self.compose_file.absolute()) + if use_gpu and self.gpu_compose_file.exists(): + compose_files_str += f" + {self.gpu_compose_file.absolute()}" + yield False, f"Compose files: {compose_files_str}", False + if not self.compose_file.exists(): + yield False, f"ERROR: Base compose file not found at {self.compose_file.absolute()}", False return yield False, "Starting OpenRAG services...", False @@ -786,16 +827,11 @@ class ContainerManager: yield "No container runtime available" return - compose_file = ( - self.cpu_compose_file if self.use_cpu_compose else self.compose_file - ) - cmd = self.runtime_info.compose_command + [ - "-f", - str(compose_file), - "logs", - "-f", - service_name, - ] + # Build compose command with override pattern + cmd = self.runtime_info.compose_command + ["-f", str(self.compose_file)] + if self.use_gpu_compose and self.gpu_compose_file.exists(): + cmd.extend(["-f", str(self.gpu_compose_file)]) + cmd.extend(["logs", "-f", service_name]) try: process = await asyncio.create_subprocess_exec( diff --git a/src/tui/screens/monitor.py b/src/tui/screens/monitor.py index 01c243c6..99e7a040 100644 --- a/src/tui/screens/monitor.py +++ b/src/tui/screens/monitor.py @@ -581,22 +581,22 @@ class MonitorScreen(Screen): def _update_mode_row(self) -> None: """Update the mode indicator and toggle button label.""" try: - use_cpu = getattr(self.container_manager, "use_cpu_compose", True) + use_gpu = getattr(self.container_manager, "use_gpu_compose", False) indicator = self.query_one("#mode-indicator", Static) - mode_text = "Mode: CPU (no GPU detected)" if use_cpu else "Mode: GPU" + mode_text = "Mode: GPU" if use_gpu else "Mode: CPU (no GPU detected)" indicator.update(mode_text) toggle_btn = self.query_one("#toggle-mode-btn", Button) - toggle_btn.label = "Switch to GPU Mode" if use_cpu else "Switch to CPU Mode" + toggle_btn.label = "Switch to CPU Mode" if use_gpu else "Switch to GPU Mode" except Exception: pass def action_toggle_mode(self) -> None: """Toggle between CPU/GPU compose files and refresh view.""" try: - current = getattr(self.container_manager, "use_cpu_compose", True) - self.container_manager.use_cpu_compose = not current + current = getattr(self.container_manager, "use_gpu_compose", False) + self.container_manager.use_gpu_compose = not current self.notify( - "Switched to GPU compose" if not current else "Switched to CPU compose", + "Switched to GPU mode" if not current else "Switched to CPU mode", severity="information", ) self._update_mode_row() From 49e0b8423f653ae4095e124d3cca50f3d9eade73 Mon Sep 17 00:00:00 2001 From: April M <36110273+aimurphy@users.noreply.github.com> Date: Tue, 25 Nov 2025 13:13:37 -0800 Subject: [PATCH 09/43] python requirement, readme links --- README.md | 42 +++++++++++++++------------- docs/docs/get-started/docker.mdx | 2 +- docs/docs/get-started/install.mdx | 2 +- docs/docs/get-started/quickstart.mdx | 2 +- 4 files changed, 25 insertions(+), 23 deletions(-) diff --git a/README.md b/README.md index d7d1b614..4e861740 100644 --- a/README.md +++ b/README.md @@ -18,27 +18,29 @@ OpenRAG is a comprehensive Retrieval-Augmented Generation platform that enables
Quickstart   |   - TUI Interface   |   - Docker Deployment   |   + Python package   |   + Docker or Podman   |   Development   |   Troubleshooting
## Quickstart -To quickly run OpenRAG without creating or modifying any project files, use `uvx`: +To run OpenRAG without creating or modifying any project files, use `uvx`: ```bash uvx openrag ``` -This runs OpenRAG without installing it to your project or globally. -To run a specific version of OpenRAG, add the version to the command, such as: `uvx --from openrag==0.1.25 openrag`. + +This command runs OpenRAG without installing it to your project or globally. + +To run a specific version of OpenRAG, run `uvx --from openrag==VERSION openrag`. ## Install Python package -To first set up a project and then install the OpenRAG Python package, do the following: +To add the OpenRAG Python package to a Python project, use `uv`: -1. Create a new project with a virtual environment using `uv init`. +1. Create a new project with a virtual environment using `uv init`: ```bash uv init YOUR_PROJECT_NAME @@ -48,33 +50,33 @@ To first set up a project and then install the OpenRAG Python package, do the fo The `(venv)` prompt doesn't change, but `uv` commands will automatically use the project's virtual environment. For more information on virtual environments, see the [uv documentation](https://docs.astral.sh/uv/pip/environments). -2. Add OpenRAG to your project. +2. Add OpenRAG to your project: + ```bash uv add openrag ``` - To add a specific version of OpenRAG: - ```bash - uv add openrag==0.1.25 - ``` + To add a specific version of OpenRAG, run `uv add openrag==VERSION`. + +3. Start the OpenRAG terminal user interface (TUI): -3. Start the OpenRAG TUI. ```bash uv run openrag ``` 4. Continue with the [Quickstart](https://docs.openr.ag/quickstart). -For the full TUI installation guide, see [TUI](https://docs.openr.ag/install). +For all installation options, see the [OpenRAG installation guide](https://docs.openr.ag/install). ## Docker or Podman installation -For more information, see [Install OpenRAG containers](https://docs.openr.ag/docker). - -## Troubleshooting - -For common issues and fixes, see [Troubleshoot](https://docs.openr.ag/support/troubleshoot). +By default, OpenRAG automatically starts the required containers and helps you manage them. +To install OpenRAG with self-managed containers, see the [OpenRAG installation guide](https://docs.openr.ag/docker). ## Development -For developers wanting to contribute to OpenRAG or set up a development environment, see [CONTRIBUTING.md](CONTRIBUTING.md). \ No newline at end of file +For developers wanting to contribute to OpenRAG or set up a development environment, see [CONTRIBUTING.md](CONTRIBUTING.md). + +## Troubleshooting + +For common issues and fixes, see [Troubleshoot OpenRAG](https://docs.openr.ag/support/troubleshoot). \ No newline at end of file diff --git a/docs/docs/get-started/docker.mdx b/docs/docs/get-started/docker.mdx index b9bb312a..0771b08e 100644 --- a/docs/docs/get-started/docker.mdx +++ b/docs/docs/get-started/docker.mdx @@ -18,7 +18,7 @@ OpenRAG has two Docker Compose files. Both files deploy the same applications an - Install the following: - - [Python](https://www.python.org/downloads/release/python-3100/) version 3.10 to 3.13. + - [Python](https://www.python.org/downloads/release/python-3100/) version 3.13 or later. - [uv](https://docs.astral.sh/uv/getting-started/installation/). - [Podman](https://podman.io/docs/installation) (recommended) or [Docker](https://docs.docker.com/get-docker/). - [`podman-compose`](https://docs.podman.io/en/latest/markdown/podman-compose.1.html) or [Docker Compose](https://docs.docker.com/compose/install/). To use Docker Compose with Podman, you must alias Docker Compose commands to Podman commands. diff --git a/docs/docs/get-started/install.mdx b/docs/docs/get-started/install.mdx index 3c1c4bcc..921c94d6 100644 --- a/docs/docs/get-started/install.mdx +++ b/docs/docs/get-started/install.mdx @@ -22,7 +22,7 @@ If you prefer running Podman or Docker containers and manually editing `.env` fi ## Prerequisites -- All OpenRAG installations require [Python](https://www.python.org/downloads/release/python-3100/) version 3.10 to 3.13. +- All OpenRAG installations require [Python](https://www.python.org/downloads/release/python-3100/) version 3.13 or later. - If you aren't using the automatic installer script, install the following: diff --git a/docs/docs/get-started/quickstart.mdx b/docs/docs/get-started/quickstart.mdx index 80eb0902..2e333a14 100644 --- a/docs/docs/get-started/quickstart.mdx +++ b/docs/docs/get-started/quickstart.mdx @@ -18,7 +18,7 @@ This quickstart requires the following: This quickstart uses OpenAI for simplicity. For other providers, see the complete [installation guide](/install). -- [Python](https://www.python.org/downloads/release/python-3100/) version 3.10 to 3.13. +- [Python](https://www.python.org/downloads/release/python-3100/) version 3.13 or later. - Microsoft Windows only: To run OpenRAG on Windows, you must use the Windows Subsystem for Linux (WSL). From 3107c4086e631b2a4a7acaafe96d3b17e54bf218 Mon Sep 17 00:00:00 2001 From: Mike Fortman Date: Tue, 25 Nov 2025 16:03:18 -0600 Subject: [PATCH 10/43] add tooltip to view full file names --- frontend/app/knowledge/page.tsx | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/frontend/app/knowledge/page.tsx b/frontend/app/knowledge/page.tsx index 1b9e6f85..0edddaf4 100644 --- a/frontend/app/knowledge/page.tsx +++ b/frontend/app/knowledge/page.tsx @@ -31,6 +31,11 @@ import { DialogTrigger, } from "@/components/ui/dialog"; import { StatusBadge } from "@/components/ui/status-badge"; +import { + Tooltip, + TooltipContent, + TooltipTrigger, +} from "@/components/ui/tooltip"; import { DeleteConfirmationDialog, formatFilesToDelete, @@ -156,9 +161,14 @@ function SearchPage() { }} > {getSourceIcon(data?.connector_type)} - - {value} - + + + + {value} + + + {value} + ); From d5bf5b0c1821f6d18f91a3be856d2ab4e9baa792 Mon Sep 17 00:00:00 2001 From: Edwin Jose Date: Tue, 25 Nov 2025 17:06:06 -0500 Subject: [PATCH 11/43] Update docker-compose.yml --- docker-compose.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index c3382bf3..7ba0cea8 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -45,9 +45,9 @@ services: openrag-backend: image: phact/openrag-backend:${OPENRAG_VERSION:-latest} - build: - context: . - dockerfile: Dockerfile.backend + # build: + # context: . + # dockerfile: Dockerfile.backend container_name: openrag-backend depends_on: - langflow @@ -89,9 +89,9 @@ services: openrag-frontend: image: phact/openrag-frontend:${OPENRAG_VERSION:-latest} - build: - context: . - dockerfile: Dockerfile.frontend + # build: + # context: . + # dockerfile: Dockerfile.frontend container_name: openrag-frontend depends_on: - openrag-backend @@ -104,9 +104,9 @@ services: volumes: - ./flows:/app/flows:U,z image: phact/openrag-langflow:${LANGFLOW_VERSION:-latest} - build: - context: . - dockerfile: Dockerfile.langflow + # build: + # context: . + # dockerfile: Dockerfile.langflow container_name: langflow ports: - "7860:7860" From e9e70545a059ce00505c631e406b99c28b1d026c Mon Sep 17 00:00:00 2001 From: Mike Fortman Date: Tue, 25 Nov 2025 16:06:42 -0600 Subject: [PATCH 12/43] alignment improvement --- frontend/app/knowledge/page.tsx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/frontend/app/knowledge/page.tsx b/frontend/app/knowledge/page.tsx index 0edddaf4..9a526159 100644 --- a/frontend/app/knowledge/page.tsx +++ b/frontend/app/knowledge/page.tsx @@ -167,7 +167,9 @@ function SearchPage() { {value} - {value} + + {value} + From 284b982dbfad646dbdb77582d8094bf261299dee Mon Sep 17 00:00:00 2001 From: Edwin Jose Date: Tue, 25 Nov 2025 17:47:54 -0500 Subject: [PATCH 13/43] Add SELECTED_EMBEDDING_MODEL global variable support Introduces SELECTED_EMBEDDING_MODEL as a global environment variable in docker-compose files and ensures it is passed in API headers for Langflow-related services. Updates settings and onboarding logic to set this variable without triggering flow updates, improving embedding model configuration consistency across services. --- docker-compose-cpu.yml | 3 ++- docker-compose.yml | 3 ++- src/api/settings.py | 31 +++++++++++---------------- src/services/chat_service.py | 24 ++++++++++++++++++--- src/services/langflow_file_service.py | 6 ++++++ 5 files changed, 43 insertions(+), 24 deletions(-) diff --git a/docker-compose-cpu.yml b/docker-compose-cpu.yml index 50e118b7..fad301e6 100644 --- a/docker-compose-cpu.yml +++ b/docker-compose-cpu.yml @@ -129,7 +129,8 @@ services: - FILENAME=None - MIMETYPE=None - FILESIZE=0 - - LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,FILENAME,MIMETYPE,FILESIZE + - SELECTED_EMBEDDING_MODEL=text-embedding-3-small + - LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,FILENAME,MIMETYPE,FILESIZE,SELECTED_EMBEDDING_MODEL - LANGFLOW_LOG_LEVEL=DEBUG - LANGFLOW_AUTO_LOGIN=${LANGFLOW_AUTO_LOGIN} - LANGFLOW_SUPERUSER=${LANGFLOW_SUPERUSER} diff --git a/docker-compose.yml b/docker-compose.yml index 7ba0cea8..dae748eb 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -130,8 +130,9 @@ services: - FILENAME=None - MIMETYPE=None - FILESIZE=0 + - SELECTED_EMBEDDING_MODEL=text-embedding-3-small - OPENSEARCH_PASSWORD=${OPENSEARCH_PASSWORD} - - LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,FILENAME,MIMETYPE,FILESIZE + - LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,FILENAME,MIMETYPE,FILESIZE,SELECTED_EMBEDDING_MODEL - LANGFLOW_LOG_LEVEL=DEBUG - LANGFLOW_AUTO_LOGIN=${LANGFLOW_AUTO_LOGIN} - LANGFLOW_SUPERUSER=${LANGFLOW_SUPERUSER} diff --git a/src/api/settings.py b/src/api/settings.py index 5b1eb22a..8cde2c3b 100644 --- a/src/api/settings.py +++ b/src/api/settings.py @@ -614,7 +614,7 @@ async def update_settings(request, session_manager): ) logger.info("Set OLLAMA_BASE_URL global variable in Langflow") - # Update model values across flows if provider or model changed + # Update LLM model values across flows if provider or model changed if "llm_provider" in body or "llm_model" in body: flows_service = _get_flows_service() llm_provider = current_config.agent.llm_provider.lower() @@ -629,18 +629,13 @@ async def update_settings(request, session_manager): f"Successfully updated Langflow flows for LLM provider {llm_provider}" ) + # Update SELECTED_EMBEDDING_MODEL global variable (no flow updates needed) if "embedding_provider" in body or "embedding_model" in body: - flows_service = _get_flows_service() - embedding_provider = current_config.knowledge.embedding_provider.lower() - embedding_provider_config = current_config.get_embedding_provider_config() - embedding_endpoint = getattr(embedding_provider_config, "endpoint", None) - await flows_service.change_langflow_model_value( - embedding_provider, - embedding_model=current_config.knowledge.embedding_model, - endpoint=embedding_endpoint, + await clients._create_langflow_global_variable( + "SELECTED_EMBEDDING_MODEL", current_config.knowledge.embedding_model, modify=True ) logger.info( - f"Successfully updated Langflow flows for embedding provider {embedding_provider}" + f"Set SELECTED_EMBEDDING_MODEL global variable to {current_config.knowledge.embedding_model}" ) except Exception as e: @@ -928,7 +923,7 @@ async def onboarding(request, flows_service): ) logger.info("Set OLLAMA_BASE_URL global variable in Langflow") - # Update flows with model values + # Update flows with LLM model values if "llm_provider" in body or "llm_model" in body: llm_provider = current_config.agent.llm_provider.lower() llm_provider_config = current_config.get_llm_provider_config() @@ -940,16 +935,14 @@ async def onboarding(request, flows_service): ) logger.info(f"Updated Langflow flows for LLM provider {llm_provider}") + # Set SELECTED_EMBEDDING_MODEL global variable (no flow updates needed) if "embedding_provider" in body or "embedding_model" in body: - embedding_provider = current_config.knowledge.embedding_provider.lower() - embedding_provider_config = current_config.get_embedding_provider_config() - embedding_endpoint = getattr(embedding_provider_config, "endpoint", None) - await flows_service.change_langflow_model_value( - provider=embedding_provider, - embedding_model=current_config.knowledge.embedding_model, - endpoint=embedding_endpoint, + await clients._create_langflow_global_variable( + "SELECTED_EMBEDDING_MODEL", current_config.knowledge.embedding_model, modify=True + ) + logger.info( + f"Set SELECTED_EMBEDDING_MODEL global variable to {current_config.knowledge.embedding_model}" ) - logger.info(f"Updated Langflow flows for embedding provider {embedding_provider}") except Exception as e: logger.error( diff --git a/src/services/chat_service.py b/src/services/chat_service.py index 63a1415b..d8cb320d 100644 --- a/src/services/chat_service.py +++ b/src/services/chat_service.py @@ -60,11 +60,17 @@ class ChatService: "LANGFLOW_URL and LANGFLOW_CHAT_FLOW_ID environment variables are required" ) - # Prepare extra headers for JWT authentication + # Prepare extra headers for JWT authentication and embedding model extra_headers = {} if jwt_token: extra_headers["X-LANGFLOW-GLOBAL-VAR-JWT"] = jwt_token + # Pass the selected embedding model as a global variable + from config.config_manager import get_openrag_config + config = get_openrag_config() + embedding_model = config.knowledge.embedding_model + extra_headers["X-LANGFLOW-GLOBAL-VAR-SELECTED_EMBEDDING_MODEL"] = embedding_model + # Get context variables for filters, limit, and threshold from auth_context import ( get_score_threshold, @@ -169,11 +175,17 @@ class ChatService: "LANGFLOW_URL and NUDGES_FLOW_ID environment variables are required" ) - # Prepare extra headers for JWT authentication + # Prepare extra headers for JWT authentication and embedding model extra_headers = {} if jwt_token: extra_headers["X-LANGFLOW-GLOBAL-VAR-JWT"] = jwt_token + # Pass the selected embedding model as a global variable + from config.config_manager import get_openrag_config + config = get_openrag_config() + embedding_model = config.knowledge.embedding_model + extra_headers["X-LANGFLOW-GLOBAL-VAR-SELECTED_EMBEDDING_MODEL"] = embedding_model + # Build the complete filter expression like the chat service does filter_expression = {} has_user_filters = False @@ -287,10 +299,16 @@ class ChatService: document_prompt = f"I'm uploading a document called '{filename}'. Here is its content:\n\n{document_content}\n\nPlease confirm you've received this document and are ready to answer questions about it." if endpoint == "langflow": - # Prepare extra headers for JWT authentication + # Prepare extra headers for JWT authentication and embedding model extra_headers = {} if jwt_token: extra_headers["X-LANGFLOW-GLOBAL-VAR-JWT"] = jwt_token + + # Pass the selected embedding model as a global variable + from config.config_manager import get_openrag_config + config = get_openrag_config() + embedding_model = config.knowledge.embedding_model + extra_headers["X-LANGFLOW-GLOBAL-VAR-SELECTED_EMBEDDING_MODEL"] = embedding_model # Ensure the Langflow client exists; try lazy init if needed langflow_client = await clients.ensure_langflow_client() if not langflow_client: diff --git a/src/services/langflow_file_service.py b/src/services/langflow_file_service.py index 017431bf..103716e1 100644 --- a/src/services/langflow_file_service.py +++ b/src/services/langflow_file_service.py @@ -140,6 +140,11 @@ class LangflowFileService: filename = str(file_tuples[0][0]) if file_tuples and len(file_tuples) > 0 else "" mimetype = str(file_tuples[0][2]) if file_tuples and len(file_tuples) > 0 and len(file_tuples[0]) > 2 else "" + # Get the current embedding model from config + from config.config_manager import get_openrag_config + config = get_openrag_config() + embedding_model = config.knowledge.embedding_model + headers={ "X-Langflow-Global-Var-JWT": str(jwt_token), "X-Langflow-Global-Var-OWNER": str(owner), @@ -149,6 +154,7 @@ class LangflowFileService: "X-Langflow-Global-Var-FILENAME": filename, "X-Langflow-Global-Var-MIMETYPE": mimetype, "X-Langflow-Global-Var-FILESIZE": str(file_size_bytes), + "X-Langflow-Global-Var-SELECTED_EMBEDDING_MODEL": str(embedding_model), } logger.info(f"[LF] Headers {headers}") logger.info(f"[LF] Payload {payload}") From 0c191edaaf18af1639ca6ae9f1cac48b9d3519db Mon Sep 17 00:00:00 2001 From: Edwin Jose Date: Tue, 25 Nov 2025 19:09:21 -0500 Subject: [PATCH 14/43] Switch ingestion flow to multimodal OpenSearch component Replaces OpenSearchHybrid with OpenSearchVectorStoreComponentMultimodalMultiEmbedding in ingestion_flow.json, updating all relevant edges and embedding connections. Updates docker-compose.yml to use local builds for backend, frontend, and langflow, and improves environment variable handling for API keys. This refactor enables multi-model and multimodal embedding support for document ingestion and search. --- docker-compose.yml | 32 +- flows/ingestion_flow.json | 2848 ++++++++++++++++++-------- flows/openrag_agent.json | 3933 ++++++++++++++++++++++++------------ flows/openrag_nudges.json | 3638 ++++++++++++++++++++++----------- flows/openrag_url_mcp.json | 3646 ++++++++++++++++++++++++++------- 5 files changed, 9997 insertions(+), 4100 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index dae748eb..25596f3b 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -45,9 +45,9 @@ services: openrag-backend: image: phact/openrag-backend:${OPENRAG_VERSION:-latest} - # build: - # context: . - # dockerfile: Dockerfile.backend + build: + context: . + dockerfile: Dockerfile.backend container_name: openrag-backend depends_on: - langflow @@ -89,9 +89,9 @@ services: openrag-frontend: image: phact/openrag-frontend:${OPENRAG_VERSION:-latest} - # build: - # context: . - # dockerfile: Dockerfile.frontend + build: + context: . + dockerfile: Dockerfile.frontend container_name: openrag-frontend depends_on: - openrag-backend @@ -104,20 +104,20 @@ services: volumes: - ./flows:/app/flows:U,z image: phact/openrag-langflow:${LANGFLOW_VERSION:-latest} - # build: - # context: . - # dockerfile: Dockerfile.langflow + build: + context: . + dockerfile: Dockerfile.langflow container_name: langflow ports: - "7860:7860" environment: - LANGFLOW_DEACTIVATE_TRACING=true - - OPENAI_API_KEY=${OPENAI_API_KEY} - - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} - - WATSONX_API_KEY=${WATSONX_API_KEY} - - WATSONX_ENDPOINT=${WATSONX_ENDPOINT} - - WATSONX_PROJECT_ID=${WATSONX_PROJECT_ID} - - OLLAMA_BASE_URL=${OLLAMA_ENDPOINT} + - OPENAI_API_KEY=${OPENAI_API_KEY:-None} + - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-None} + - WATSONX_API_KEY=${WATSONX_API_KEY:-None} + - WATSONX_ENDPOINT=${WATSONX_ENDPOINT:-None} + - WATSONX_PROJECT_ID=${WATSONX_PROJECT_ID:-None} + - OLLAMA_BASE_URL=${OLLAMA_ENDPOINT:-None} - LANGFLOW_LOAD_FLOWS_PATH=/app/flows - LANGFLOW_SECRET_KEY=${LANGFLOW_SECRET_KEY} - JWT=None @@ -132,7 +132,7 @@ services: - FILESIZE=0 - SELECTED_EMBEDDING_MODEL=text-embedding-3-small - OPENSEARCH_PASSWORD=${OPENSEARCH_PASSWORD} - - LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,FILENAME,MIMETYPE,FILESIZE,SELECTED_EMBEDDING_MODEL + - LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,FILENAME,MIMETYPE,FILESIZE,SELECTED_EMBEDDING_MODEL,OPENAI_API_KEY,ANTHROPIC_API_KEY,WATSONX_API_KEY,WATSONX_ENDPOINT,WATSONX_PROJECT_ID,OLLAMA_BASE_URL - LANGFLOW_LOG_LEVEL=DEBUG - LANGFLOW_AUTO_LOGIN=${LANGFLOW_AUTO_LOGIN} - LANGFLOW_SUPERUSER=${LANGFLOW_SUPERUSER} diff --git a/flows/ingestion_flow.json b/flows/ingestion_flow.json index d517c1aa..3a9fccd8 100644 --- a/flows/ingestion_flow.json +++ b/flows/ingestion_flow.json @@ -1,35 +1,6 @@ { "data": { "edges": [ - { - "animated": false, - "className": "", - "data": { - "sourceHandle": { - "dataType": "SplitText", - "id": "SplitText-QIKhg", - "name": "dataframe", - "output_types": [ - "DataFrame" - ] - }, - "targetHandle": { - "fieldName": "ingest_data", - "id": "OpenSearchHybrid-Ve6bS", - "inputTypes": [ - "Data", - "DataFrame" - ], - "type": "other" - } - }, - "id": "xy-edge__SplitText-QIKhg{œdataTypeœ:œSplitTextœ,œidœ:œSplitText-QIKhgœ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}-OpenSearchHybrid-Ve6bS{œfieldNameœ:œingest_dataœ,œidœ:œOpenSearchHybrid-Ve6bSœ,œinputTypesœ:[œDataœ,œDataFrameœ],œtypeœ:œotherœ}", - "selected": false, - "source": "SplitText-QIKhg", - "sourceHandle": "{œdataTypeœ:œSplitTextœ,œidœ:œSplitText-QIKhgœ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}", - "target": "OpenSearchHybrid-Ve6bS", - "targetHandle": "{œfieldNameœ:œingest_dataœ,œidœ:œOpenSearchHybrid-Ve6bSœ,œinputTypesœ:[œDataœ,œDataFrameœ],œtypeœ:œotherœ}" - }, { "animated": false, "className": "", @@ -146,34 +117,6 @@ "target": "AdvancedDynamicFormBuilder-81Exw", "targetHandle": "{œfieldNameœ:œdynamic_owner_nameœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" }, - { - "animated": false, - "className": "", - "data": { - "sourceHandle": { - "dataType": "AdvancedDynamicFormBuilder", - "id": "AdvancedDynamicFormBuilder-81Exw", - "name": "form_data", - "output_types": [ - "Data" - ] - }, - "targetHandle": { - "fieldName": "docs_metadata", - "id": "OpenSearchHybrid-Ve6bS", - "inputTypes": [ - "Data" - ], - "type": "table" - } - }, - "id": "xy-edge__AdvancedDynamicFormBuilder-81Exw{œdataTypeœ:œAdvancedDynamicFormBuilderœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œnameœ:œform_dataœ,œoutput_typesœ:[œDataœ]}-OpenSearchHybrid-Ve6bS{œfieldNameœ:œdocs_metadataœ,œidœ:œOpenSearchHybrid-Ve6bSœ,œinputTypesœ:[œDataœ],œtypeœ:œtableœ}", - "selected": false, - "source": "AdvancedDynamicFormBuilder-81Exw", - "sourceHandle": "{œdataTypeœ:œAdvancedDynamicFormBuilderœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œnameœ:œform_dataœ,œoutput_typesœ:[œDataœ]}", - "target": "OpenSearchHybrid-Ve6bS", - "targetHandle": "{œfieldNameœ:œdocs_metadataœ,œidœ:œOpenSearchHybrid-Ve6bSœ,œinputTypesœ:[œDataœ],œtypeœ:œtableœ}" - }, { "animated": false, "className": "", @@ -317,13 +260,70 @@ "target": "DataFrameOperations-N80fC", "targetHandle": "{œfieldNameœ:œdfœ,œidœ:œDataFrameOperations-N80fCœ,œinputTypesœ:[œDataFrameœ],œtypeœ:œotherœ}" }, + { + "animated": false, + "className": "", + "data": { + "sourceHandle": { + "dataType": "AdvancedDynamicFormBuilder", + "id": "AdvancedDynamicFormBuilder-81Exw", + "name": "form_data", + "output_types": [ + "Data" + ] + }, + "targetHandle": { + "fieldName": "docs_metadata", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4", + "inputTypes": [ + "Data" + ], + "type": "table" + } + }, + "id": "xy-edge__AdvancedDynamicFormBuilder-81Exw{œdataTypeœ:œAdvancedDynamicFormBuilderœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œnameœ:œform_dataœ,œoutput_typesœ:[œDataœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4{œfieldNameœ:œdocs_metadataœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4œ,œinputTypesœ:[œDataœ],œtypeœ:œtableœ}", + "selected": false, + "source": "AdvancedDynamicFormBuilder-81Exw", + "sourceHandle": "{œdataTypeœ:œAdvancedDynamicFormBuilderœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œnameœ:œform_dataœ,œoutput_typesœ:[œDataœ]}", + "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4", + "targetHandle": "{œfieldNameœ:œdocs_metadataœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4œ,œinputTypesœ:[œDataœ],œtypeœ:œtableœ}" + }, + { + "animated": false, + "className": "", + "data": { + "sourceHandle": { + "dataType": "SplitText", + "id": "SplitText-QIKhg", + "name": "dataframe", + "output_types": [ + "DataFrame" + ] + }, + "targetHandle": { + "fieldName": "ingest_data", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4", + "inputTypes": [ + "Data", + "DataFrame" + ], + "type": "other" + } + }, + "id": "xy-edge__SplitText-QIKhg{œdataTypeœ:œSplitTextœ,œidœ:œSplitText-QIKhgœ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4{œfieldNameœ:œingest_dataœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4œ,œinputTypesœ:[œDataœ,œDataFrameœ],œtypeœ:œotherœ}", + "selected": false, + "source": "SplitText-QIKhg", + "sourceHandle": "{œdataTypeœ:œSplitTextœ,œidœ:œSplitText-QIKhgœ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}", + "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4", + "targetHandle": "{œfieldNameœ:œingest_dataœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4œ,œinputTypesœ:[œDataœ,œDataFrameœ],œtypeœ:œotherœ}" + }, { "animated": false, "className": "", "data": { "sourceHandle": { "dataType": "EmbeddingModel", - "id": "EmbeddingModel-WIe3H", + "id": "EmbeddingModel-3LsIP", "name": "embeddings", "output_types": [ "Embeddings" @@ -331,19 +331,74 @@ }, "targetHandle": { "fieldName": "embedding", - "id": "OpenSearchHybrid-Ve6bS", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4", "inputTypes": [ "Embeddings" ], "type": "other" } }, - "id": "xy-edge__EmbeddingModel-WIe3H{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-WIe3Hœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-OpenSearchHybrid-Ve6bS{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchHybrid-Ve6bSœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}", + "id": "xy-edge__EmbeddingModel-3LsIP{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-3LsIPœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4œ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}", "selected": false, - "source": "EmbeddingModel-WIe3H", - "sourceHandle": "{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-WIe3Hœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}", - "target": "OpenSearchHybrid-Ve6bS", - "targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchHybrid-Ve6bSœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}" + "source": "EmbeddingModel-3LsIP", + "sourceHandle": "{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-3LsIPœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}", + "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4", + "targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4œ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}" + }, + { + "animated": false, + "className": "", + "data": { + "sourceHandle": { + "dataType": "EmbeddingModel", + "id": "EmbeddingModel-E0hvR", + "name": "embeddings", + "output_types": [ + "Embeddings" + ] + }, + "targetHandle": { + "fieldName": "embedding", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4", + "inputTypes": [ + "Embeddings" + ], + "type": "other" + } + }, + "id": "xy-edge__EmbeddingModel-E0hvR{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-E0hvRœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4œ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}", + "selected": false, + "source": "EmbeddingModel-E0hvR", + "sourceHandle": "{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-E0hvRœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}", + "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4", + "targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4œ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}" + }, + { + "animated": false, + "data": { + "sourceHandle": { + "dataType": "EmbeddingModel", + "id": "EmbeddingModel-EAo9i", + "name": "embeddings", + "output_types": [ + "Embeddings" + ] + }, + "targetHandle": { + "fieldName": "embedding", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4", + "inputTypes": [ + "Embeddings" + ], + "type": "other" + } + }, + "id": "xy-edge__EmbeddingModel-EAo9i{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-EAo9iœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4œ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}", + "selected": false, + "source": "EmbeddingModel-EAo9i", + "sourceHandle": "{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-EAo9iœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}", + "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4", + "targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4œ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}" } ], "nodes": [ @@ -374,7 +429,7 @@ "frozen": false, "icon": "scissors-line-dashed", "legacy": false, - "lf_version": "1.6.3.dev0", + "lf_version": "1.7.0", "metadata": { "code_hash": "f2867efda61f", "dependencies": { @@ -579,8 +634,8 @@ "width": 320 }, "position": { - "x": 1704.25352249077, - "y": 1199.364065218893 + "x": 1711.934915237861, + "y": 1637.2034518030887 }, "positionAbsolute": { "x": 1683.4543896546102, @@ -590,651 +645,6 @@ "type": "genericNode", "width": 320 }, - { - "data": { - "id": "OpenSearchHybrid-Ve6bS", - "node": { - "base_classes": [ - "Data", - "DataFrame", - "VectorStore" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.", - "display_name": "OpenSearch", - "documentation": "", - "edited": true, - "field_order": [ - "docs_metadata", - "opensearch_url", - "index_name", - "engine", - "space_type", - "ef_construction", - "m", - "ingest_data", - "search_query", - "should_cache_vector_store", - "embedding", - "embedding_model_name", - "vector_field", - "number_of_results", - "filter_expression", - "auth_mode", - "username", - "password", - "jwt_token", - "jwt_header", - "bearer_prefix", - "use_ssl", - "verify_certs" - ], - "frozen": false, - "icon": "OpenSearch", - "last_updated": "2025-10-10T14:37:10.405Z", - "legacy": false, - "metadata": { - "code_hash": "62d330aec569", - "dependencies": { - "dependencies": [ - { - "name": "opensearchpy", - "version": "2.8.0" - }, - { - "name": "lfx", - "version": "0.1.12.dev32" - } - ], - "total_dependencies": 2 - }, - "module": "custom_components.opensearch" - }, - "minimized": false, - "output_types": [], - "outputs": [ - { - "allows_loop": false, - "cache": true, - "display_name": "Search Results", - "group_outputs": false, - "hidden": null, - "method": "search_documents", - "name": "search_results", - "options": null, - "required_inputs": null, - "selected": "Data", - "tool_mode": true, - "types": [ - "Data" - ], - "value": "__UNDEFINED__" - }, - { - "allows_loop": false, - "cache": true, - "display_name": "DataFrame", - "group_outputs": false, - "hidden": null, - "method": "as_dataframe", - "name": "dataframe", - "options": null, - "required_inputs": null, - "selected": "DataFrame", - "tool_mode": true, - "types": [ - "DataFrame" - ], - "value": "__UNDEFINED__" - }, - { - "allows_loop": false, - "cache": true, - "display_name": "Vector Store Connection", - "group_outputs": false, - "hidden": false, - "method": "as_vector_store", - "name": "vectorstoreconnection", - "options": null, - "required_inputs": null, - "selected": "VectorStore", - "tool_mode": true, - "types": [ - "VectorStore" - ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "auth_mode": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Authentication Mode", - "dynamic": false, - "external_options": {}, - "info": "Authentication method: 'basic' for username/password authentication, or 'jwt' for JSON Web Token (Bearer) authentication.", - "load_from_db": false, - "name": "auth_mode", - "options": [ - "basic", - "jwt" - ], - "options_metadata": [], - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "jwt" - }, - "bearer_prefix": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Prefix 'Bearer '", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "name": "bearer_prefix", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "bool", - "value": true - }, - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "code", - "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom typing import Any, List, Optional\n\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponent(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"]\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"]),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model being used (e.g., 'text-embedding-3-small'). \"\n \"Used to create dynamic vector field names and track which model embedded each document. \"\n \"Auto-detected from embedding component if not specified.\"\n ),\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=False,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=False,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=True,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from embedding component\n if hasattr(self, \"embedding\") and self.embedding:\n if hasattr(self.embedding, \"model\"):\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_name\"):\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'model' or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\n \"type\": \"keyword\"\n },\n \"embedding_dimensions\": {\n \"type\": \"integer\"\n }\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n raise ValueError(\n f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n )\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n self.log(self.ingest_data)\n client = self.build_client()\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n docs = self.ingest_data or []\n if not docs:\n self.log(\"No documents to ingest.\")\n return\n\n # Get embedding model name\n embedding_model = self._get_embedding_model_name()\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n self.log(f\"Using embedding model: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return self.embedding.embed_documents([chunk_text])[0]\n\n vectors: Optional[List[List[float]]] = None\n last_exception: Optional[Exception] = None\n delay = 1.0\n attempts = 0\n\n while attempts < 3:\n attempts += 1\n try:\n max_workers = min(max(len(texts), 1), 8)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= 3:\n logger.error(\n \"Embedding generation failed after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed (attempt %s/%s), retrying in %.1fs\",\n attempts,\n 3,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed: {last_exception}\" if last_exception else \"Embedding generation failed\"\n )\n\n if not vectors:\n self.log(\"No vectors generated from documents.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(\n f\"Failed to create index '{self.index_name}': {creation_error}\"\n )\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\n \"size\": 0,\n \"aggs\": {\n \"embedding_models\": {\n \"terms\": {\n \"field\": \"embedding_model\",\n \"size\": 10\n }\n }\n }\n }\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\n \"bool\": {\n \"filter\": filter_clauses\n }\n }\n\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n return models\n except Exception as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n return [self._get_embedding_model_name()]\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except Exception as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(\n f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\"\n )\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return True\n\n return False\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models in parallel\n query_embeddings = {}\n\n # Note: Langflow is synchronous, so we can't use true async here\n # But we log the intent for parallel processing\n logger.info(f\"Generating embeddings for {len(available_models)} models\")\n\n original_model_attr = getattr(self.embedding, \"model\", None)\n original_deployment_attr = getattr(self.embedding, \"deployment\", None)\n original_dimensions_attr = getattr(self.embedding, \"dimensions\", None)\n\n for model_name in available_models:\n try:\n # In a real async environment, these would run in parallel\n # For now, they run sequentially\n if hasattr(self.embedding, \"model\"):\n setattr(self.embedding, \"model\", model_name)\n if hasattr(self.embedding, \"deployment\"):\n setattr(self.embedding, \"deployment\", model_name)\n if hasattr(self.embedding, \"dimensions\"):\n setattr(self.embedding, \"dimensions\", None)\n vec = self.embedding.embed_query(q)\n query_embeddings[model_name] = vec\n logger.info(f\"Generated embedding for model: {model_name}\")\n except Exception as e:\n logger.error(f\"Failed to generate embedding for {model_name}: {e}\")\n\n if hasattr(self.embedding, \"model\"):\n setattr(self.embedding, \"model\", original_model_attr)\n if hasattr(self.embedding, \"deployment\"):\n setattr(self.embedding, \"deployment\", original_deployment_attr)\n if hasattr(self.embedding, \"dimensions\"):\n setattr(self.embedding, \"dimensions\", original_dimensions_attr)\n\n if not query_embeddings:\n msg = \"Failed to generate embeddings for any model\"\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n continue\n\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\n \"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)],\n \"minimum_should_match\": 1\n }\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models\"\n )\n\n try:\n resp = client.search(\n index=self.index_name, body=body, params={\"terminate_after\": 0}\n )\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n raw = self.search(self.search_query or \"\")\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n self.log(self.ingest_data)\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n if is_basic:\n build_config[\"jwt_token\"][\"value\"] = \"\"\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" - }, - "docs_metadata": { - "_input_type": "TableInput", - "advanced": false, - "display_name": "Document Metadata", - "dynamic": false, - "info": "Additional metadata key-value pairs to be added to all ingested documents. Useful for tagging documents with source information, categories, or other custom attributes.", - "input_types": [ - "Data" - ], - "is_list": true, - "list_add_label": "Add More", - "name": "docs_metadata", - "placeholder": "", - "required": false, - "show": true, - "table_icon": "Table", - "table_schema": [ - { - "description": "Key name", - "display_name": "Key", - "formatter": "text", - "name": "key", - "type": "str" - }, - { - "description": "Value of the metadata", - "display_name": "Value", - "formatter": "text", - "name": "value", - "type": "str" - } - ], - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "trigger_icon": "Table", - "trigger_text": "Open table", - "type": "table", - "value": [] - }, - "ef_construction": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "EF Construction", - "dynamic": false, - "info": "Size of the dynamic candidate list during index construction. Higher values improve recall but increase indexing time and memory usage.", - "list": false, - "list_add_label": "Add More", - "name": "ef_construction", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "int", - "value": 512 - }, - "embedding": { - "_input_type": "HandleInput", - "advanced": false, - "display_name": "Embedding", - "dynamic": false, - "info": "", - "input_types": [ - "Embeddings" - ], - "list": false, - "list_add_label": "Add More", - "name": "embedding", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_metadata": true, - "type": "other", - "value": "" - }, - "embedding_model_name": { - "_input_type": "StrInput", - "advanced": false, - "display_name": "Embedding Model Name", - "dynamic": false, - "info": "Name of the embedding model being used (e.g., 'text-embedding-3-small'). Used to create dynamic vector field names and track which model embedded each document. Auto-detected from embedding component if not specified.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "embedding_model_name", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "engine": { - "_input_type": "DropdownInput", - "advanced": true, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Vector Engine", - "dynamic": false, - "external_options": {}, - "info": "Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.", - "load_from_db": false, - "name": "engine", - "options": [ - "jvector", - "nmslib", - "faiss", - "lucene" - ], - "options_metadata": [], - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "jvector" - }, - "filter_expression": { - "_input_type": "MultilineInput", - "advanced": false, - "copy_field": false, - "display_name": "Search Filters (JSON)", - "dynamic": false, - "info": "Optional JSON configuration for search filtering, result limits, and score thresholds.\n\nFormat 1 - Explicit filters:\n{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, {\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\n\nFormat 2 - Context-style mapping:\n{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\n\nUse __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "multiline": true, - "name": "filter_expression", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "index_name": { - "_input_type": "StrInput", - "advanced": false, - "display_name": "Index Name", - "dynamic": false, - "info": "The OpenSearch index name where documents will be stored and searched. Will be created automatically if it doesn't exist.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "index_name", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "documents" - }, - "ingest_data": { - "_input_type": "HandleInput", - "advanced": false, - "display_name": "Ingest Data", - "dynamic": false, - "info": "", - "input_types": [ - "Data", - "DataFrame" - ], - "list": true, - "list_add_label": "Add More", - "name": "ingest_data", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_metadata": true, - "type": "other", - "value": "" - }, - "jwt_header": { - "_input_type": "StrInput", - "advanced": true, - "display_name": "JWT Header Name", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "jwt_header", - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "Authorization" - }, - "jwt_token": { - "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "JWT Token", - "dynamic": false, - "info": "Valid JSON Web Token for authentication. Will be sent in the Authorization header (with optional 'Bearer ' prefix).", - "input_types": [], - "load_from_db": false, - "name": "jwt_token", - "password": true, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "str", - "value": "jwt" - }, - "m": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "M Parameter", - "dynamic": false, - "info": "Number of bidirectional connections for each vector in the HNSW graph. Higher values improve search quality but increase memory usage and indexing time.", - "list": false, - "list_add_label": "Add More", - "name": "m", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "int", - "value": 16 - }, - "number_of_results": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "Default Result Limit", - "dynamic": false, - "info": "Default maximum number of search results to return when no limit is specified in the filter expression.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "number_of_results", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "int", - "value": 15 - }, - "opensearch_url": { - "_input_type": "StrInput", - "advanced": false, - "display_name": "OpenSearch URL", - "dynamic": false, - "info": "The connection URL for your OpenSearch cluster (e.g., http://localhost:9200 for local development or your cloud endpoint).", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "opensearch_url", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "https://opensearch:9200" - }, - "password": { - "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "OpenSearch Password", - "dynamic": false, - "info": "", - "input_types": [], - "load_from_db": false, - "name": "password", - "password": true, - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "type": "str", - "value": "" - }, - "search_query": { - "_input_type": "QueryInput", - "advanced": false, - "display_name": "Search Query", - "dynamic": false, - "info": "Enter a query to run a similarity search.", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "search_query", - "placeholder": "Enter a query...", - "required": false, - "show": true, - "title_case": false, - "tool_mode": true, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "query", - "value": "" - }, - "should_cache_vector_store": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Cache Vector Store", - "dynamic": false, - "info": "If True, the vector store will be cached for the current build of the component. This is useful for components that have multiple output methods and want to share the same vector store.", - "list": false, - "list_add_label": "Add More", - "name": "should_cache_vector_store", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "bool", - "value": true - }, - "space_type": { - "_input_type": "DropdownInput", - "advanced": true, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Distance Metric", - "dynamic": false, - "external_options": {}, - "info": "Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, 'cosinesimil' for cosine similarity, 'innerproduct' for dot product.", - "name": "space_type", - "options": [ - "l2", - "l1", - "cosinesimil", - "linf", - "innerproduct" - ], - "options_metadata": [], - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "l2" - }, - "use_ssl": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Use SSL/TLS", - "dynamic": false, - "info": "Enable SSL/TLS encryption for secure connections to OpenSearch.", - "list": false, - "list_add_label": "Add More", - "name": "use_ssl", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "bool", - "value": true - }, - "username": { - "_input_type": "StrInput", - "advanced": false, - "display_name": "Username", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "username", - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "admin" - }, - "vector_field": { - "_input_type": "StrInput", - "advanced": true, - "display_name": "Legacy Vector Field Name", - "dynamic": false, - "info": "Legacy field name for backward compatibility. New documents use dynamic fields (chunk_embedding_{model_name}) based on the embedding_model_name.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "vector_field", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "chunk_embedding" - }, - "verify_certs": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Verify SSL Certificates", - "dynamic": false, - "info": "Verify SSL certificates when connecting. Disable for self-signed certificates in development environments.", - "list": false, - "list_add_label": "Add More", - "name": "verify_certs", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "bool", - "value": false - } - }, - "tool_mode": false - }, - "selected_output": "search_results", - "showNode": true, - "type": "OpenSearchVectorStoreComponent" - }, - "dragging": false, - "id": "OpenSearchHybrid-Ve6bS", - "measured": { - "height": 904, - "width": 320 - }, - "position": { - "x": 2218.9287723423276, - "y": 1332.2598463956504 - }, - "selected": false, - "type": "genericNode" - }, { "data": { "id": "AdvancedDynamicFormBuilder-81Exw", @@ -1256,9 +666,9 @@ ], "frozen": false, "icon": "braces", - "last_updated": "2025-11-24T18:01:42.358Z", + "last_updated": "2025-11-26T00:02:32.601Z", "legacy": false, - "lf_version": "1.6.3.dev0", + "lf_version": "1.7.0", "metadata": {}, "minimized": false, "output_types": [], @@ -1269,6 +679,7 @@ "display_name": "Data", "group_outputs": false, "hidden": null, + "loop_types": null, "method": "process_form", "name": "form_data", "options": null, @@ -1286,6 +697,7 @@ "display_name": "Message", "group_outputs": false, "hidden": null, + "loop_types": null, "method": "get_message", "name": "message", "options": null, @@ -1300,6 +712,12 @@ ], "pinned": false, "template": { + "_frontend_node_flow_id": { + "value": "5488df7c-b93f-4f87-a446-b67028bc0813" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, "_type": "Component", "code": { "advanced": true, @@ -1322,6 +740,7 @@ "dynamic_connector_type": { "_input_type": "MultilineInput", "advanced": false, + "ai_enabled": false, "copy_field": false, "display_name": "connector_type", "dynamic": false, @@ -1336,6 +755,7 @@ "load_from_db": false, "multiline": true, "name": "dynamic_connector_type", + "override_skip": false, "placeholder": "", "real_time_refresh": null, "refresh_button": null, @@ -1353,6 +773,7 @@ "dynamic_owner": { "_input_type": "MultilineInput", "advanced": false, + "ai_enabled": false, "copy_field": false, "display_name": "owner", "dynamic": false, @@ -1367,6 +788,7 @@ "load_from_db": false, "multiline": true, "name": "dynamic_owner", + "override_skip": false, "placeholder": "", "real_time_refresh": null, "refresh_button": null, @@ -1384,6 +806,7 @@ "dynamic_owner_email": { "_input_type": "MultilineInput", "advanced": false, + "ai_enabled": false, "copy_field": false, "display_name": "owner_email", "dynamic": false, @@ -1398,6 +821,7 @@ "load_from_db": false, "multiline": true, "name": "dynamic_owner_email", + "override_skip": false, "placeholder": "", "real_time_refresh": null, "refresh_button": null, @@ -1415,6 +839,7 @@ "dynamic_owner_name": { "_input_type": "MultilineInput", "advanced": false, + "ai_enabled": false, "copy_field": false, "display_name": "owner_name", "dynamic": false, @@ -1429,6 +854,7 @@ "load_from_db": false, "multiline": true, "name": "dynamic_owner_name", + "override_skip": false, "placeholder": "", "real_time_refresh": null, "refresh_button": null, @@ -1537,7 +963,8 @@ "trace_as_metadata": true, "type": "bool", "value": false - } + }, + "is_refresh": false }, "tool_mode": false }, @@ -1552,8 +979,8 @@ "width": 320 }, "position": { - "x": 1363.7188885586695, - "y": 1810.433145275832 + "x": 1778.514096901592, + "y": 673.5870187063417 }, "selected": false, "type": "genericNode" @@ -1578,7 +1005,7 @@ "frozen": false, "icon": "type", "legacy": false, - "lf_version": "1.6.3.dev0", + "lf_version": "1.7.0", "metadata": {}, "minimized": false, "output_types": [], @@ -1652,8 +1079,8 @@ "width": 320 }, "position": { - "x": 717.1931358375118, - "y": 1935.3672380902274 + "x": 1343.2266447254406, + "y": 503.74766485111434 }, "selected": false, "type": "genericNode" @@ -1678,7 +1105,7 @@ "frozen": false, "icon": "type", "legacy": false, - "lf_version": "1.6.3.dev0", + "lf_version": "1.7.0", "metadata": {}, "minimized": false, "output_types": [], @@ -1752,8 +1179,8 @@ "width": 320 }, "position": { - "x": 715.4359658918343, - "y": 2198.0228056169435 + "x": 1341.4694747797632, + "y": 753.9209691638071 }, "selected": false, "type": "genericNode" @@ -1778,7 +1205,7 @@ "frozen": false, "icon": "type", "legacy": false, - "lf_version": "1.6.3.dev0", + "lf_version": "1.7.0", "metadata": {}, "minimized": false, "output_types": [], @@ -1852,8 +1279,8 @@ "width": 320 }, "position": { - "x": 714.0740919106219, - "y": 2445.0562064336955 + "x": 1336.2669044250051, + "y": 1000.9543699805594 }, "selected": false, "type": "genericNode" @@ -1878,7 +1305,7 @@ "frozen": false, "icon": "type", "legacy": false, - "lf_version": "1.6.3.dev0", + "lf_version": "1.7.0", "metadata": {}, "minimized": false, "output_types": [], @@ -1952,8 +1379,8 @@ "width": 320 }, "position": { - "x": 712.1292482141275, - "y": 2691.2573524344616 + "x": 1342.0034534756019, + "y": 1239.4741232342342 }, "selected": false, "type": "genericNode" @@ -1989,7 +1416,7 @@ "frozen": false, "icon": "Docling", "legacy": false, - "lf_version": "1.6.3.dev0", + "lf_version": "1.7.0", "metadata": { "code_hash": "26eeb513dded", "dependencies": { @@ -2265,7 +1692,9 @@ "bz2", "gz" ], - "file_path": [], + "file_path": [ + "167c86ee-113b-4286-990c-b9566c363215/IBM Recognition Center Home.pdf" + ], "info": "Supported file extensions: adoc, asciidoc, asc, bmp, csv, dotx, dotm, docm, docx, htm, html, jpeg, json, md, pdf, png, potx, ppsx, pptm, potm, ppsm, pptx, tiff, txt, xls, xlsx, xhtml, xml, webp; optionally bundled in file extensions: zip, tar, tgz, bz2, gz", "list": true, "list_add_label": "Add More", @@ -2325,12 +1754,12 @@ "dragging": false, "id": "DoclingRemote-Dp3PX", "measured": { - "height": 475, + "height": 479, "width": 320 }, "position": { - "x": -248.47065093890868, - "y": 1040.3002495758292 + "x": -175.49741984154275, + "y": 1505.0245107748437 }, "selected": false, "type": "genericNode" @@ -2362,7 +1791,7 @@ "icon": "Docling", "last_updated": "2025-10-04T01:42:10.290Z", "legacy": false, - "lf_version": "1.6.3.dev0", + "lf_version": "1.7.0", "metadata": { "code_hash": "4de16ddd37ac", "dependencies": { @@ -2586,14 +2015,16 @@ "width": 320 }, "position": { - "x": 134.00431977210877, - "y": 1065.2709317561028 + "x": 206.97755086947473, + "y": 1610.6498167995744 }, "selected": false, "type": "genericNode" }, { "data": { + "description": "Perform various operations on a DataFrame.", + "display_name": "DataFrame Operations", "id": "DataFrameOperations-1BWXB", "node": { "base_classes": [ @@ -2604,7 +2035,7 @@ "custom_fields": {}, "description": "Perform various operations on a DataFrame.", "display_name": "DataFrame Operations", - "documentation": "https://docs.langflow.org/components-processing#dataframe-operations", + "documentation": "https://docs.langflow.org/dataframe-operations", "edited": false, "field_order": [ "df", @@ -2622,11 +2053,11 @@ ], "frozen": false, "icon": "table", - "last_updated": "2025-11-24T18:01:42.468Z", + "last_updated": "2025-11-26T00:02:32.725Z", "legacy": false, - "lf_version": "1.6.3.dev0", + "lf_version": "1.7.0", "metadata": { - "code_hash": "b4d6b19b6eef", + "code_hash": "904f4eaebccd", "dependencies": { "dependencies": [ { @@ -2635,12 +2066,12 @@ }, { "name": "lfx", - "version": "0.1.12.dev31" + "version": null } ], "total_dependencies": 2 }, - "module": "lfx.components.processing.dataframe_operations.DataFrameOperationsComponent" + "module": "custom_components.dataframe_operations" }, "minimized": false, "output_types": [], @@ -2650,6 +2081,7 @@ "cache": true, "display_name": "DataFrame", "group_outputs": false, + "loop_types": null, "method": "perform_operation", "name": "output", "options": null, @@ -2664,6 +2096,12 @@ ], "pinned": false, "template": { + "_frontend_node_flow_id": { + "value": "5488df7c-b93f-4f87-a446-b67028bc0813" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, "_type": "Component", "ascending": { "_input_type": "BoolInput", @@ -2674,12 +2112,14 @@ "list": false, "list_add_label": "Add More", "name": "ascending", + "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "bool", "value": true }, @@ -2699,7 +2139,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import pandas as pd\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.inputs import SortableListInput\nfrom lfx.io import BoolInput, DataFrameInput, DropdownInput, IntInput, MessageTextInput, Output, StrInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dataframe import DataFrame\n\n\nclass DataFrameOperationsComponent(Component):\n display_name = \"DataFrame Operations\"\n description = \"Perform various operations on a DataFrame.\"\n documentation: str = \"https://docs.langflow.org/components-processing#dataframe-operations\"\n icon = \"table\"\n name = \"DataFrameOperations\"\n\n OPERATION_CHOICES = [\n \"Add Column\",\n \"Drop Column\",\n \"Filter\",\n \"Head\",\n \"Rename Column\",\n \"Replace Value\",\n \"Select Columns\",\n \"Sort\",\n \"Tail\",\n \"Drop Duplicates\",\n ]\n\n inputs = [\n DataFrameInput(\n name=\"df\",\n display_name=\"DataFrame\",\n info=\"The input DataFrame to operate on.\",\n required=True,\n ),\n SortableListInput(\n name=\"operation\",\n display_name=\"Operation\",\n placeholder=\"Select Operation\",\n info=\"Select the DataFrame operation to perform.\",\n options=[\n {\"name\": \"Add Column\", \"icon\": \"plus\"},\n {\"name\": \"Drop Column\", \"icon\": \"minus\"},\n {\"name\": \"Filter\", \"icon\": \"filter\"},\n {\"name\": \"Head\", \"icon\": \"arrow-up\"},\n {\"name\": \"Rename Column\", \"icon\": \"pencil\"},\n {\"name\": \"Replace Value\", \"icon\": \"replace\"},\n {\"name\": \"Select Columns\", \"icon\": \"columns\"},\n {\"name\": \"Sort\", \"icon\": \"arrow-up-down\"},\n {\"name\": \"Tail\", \"icon\": \"arrow-down\"},\n {\"name\": \"Drop Duplicates\", \"icon\": \"copy-x\"},\n ],\n real_time_refresh=True,\n limit=1,\n ),\n StrInput(\n name=\"column_name\",\n display_name=\"Column Name\",\n info=\"The column name to use for the operation.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"filter_value\",\n display_name=\"Filter Value\",\n info=\"The value to filter rows by.\",\n dynamic=True,\n show=False,\n ),\n DropdownInput(\n name=\"filter_operator\",\n display_name=\"Filter Operator\",\n options=[\n \"equals\",\n \"not equals\",\n \"contains\",\n \"not contains\",\n \"starts with\",\n \"ends with\",\n \"greater than\",\n \"less than\",\n ],\n value=\"equals\",\n info=\"The operator to apply for filtering rows.\",\n advanced=False,\n dynamic=True,\n show=False,\n ),\n BoolInput(\n name=\"ascending\",\n display_name=\"Sort Ascending\",\n info=\"Whether to sort in ascending order.\",\n dynamic=True,\n show=False,\n value=True,\n ),\n StrInput(\n name=\"new_column_name\",\n display_name=\"New Column Name\",\n info=\"The new column name when renaming or adding a column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"new_column_value\",\n display_name=\"New Column Value\",\n info=\"The value to populate the new column with.\",\n dynamic=True,\n show=False,\n ),\n StrInput(\n name=\"columns_to_select\",\n display_name=\"Columns to Select\",\n dynamic=True,\n is_list=True,\n show=False,\n ),\n IntInput(\n name=\"num_rows\",\n display_name=\"Number of Rows\",\n info=\"Number of rows to return (for head/tail).\",\n dynamic=True,\n show=False,\n value=5,\n ),\n MessageTextInput(\n name=\"replace_value\",\n display_name=\"Value to Replace\",\n info=\"The value to replace in the column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"replacement_value\",\n display_name=\"Replacement Value\",\n info=\"The value to replace with.\",\n dynamic=True,\n show=False,\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"DataFrame\",\n name=\"output\",\n method=\"perform_operation\",\n info=\"The resulting DataFrame after the operation.\",\n )\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n dynamic_fields = [\n \"column_name\",\n \"filter_value\",\n \"filter_operator\",\n \"ascending\",\n \"new_column_name\",\n \"new_column_value\",\n \"columns_to_select\",\n \"num_rows\",\n \"replace_value\",\n \"replacement_value\",\n ]\n for field in dynamic_fields:\n build_config[field][\"show\"] = False\n\n if field_name == \"operation\":\n # Handle SortableListInput format\n if isinstance(field_value, list):\n operation_name = field_value[0].get(\"name\", \"\") if field_value else \"\"\n else:\n operation_name = field_value or \"\"\n\n # If no operation selected, all dynamic fields stay hidden (already set to False above)\n if not operation_name:\n return build_config\n\n if operation_name == \"Filter\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"filter_value\"][\"show\"] = True\n build_config[\"filter_operator\"][\"show\"] = True\n elif operation_name == \"Sort\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"ascending\"][\"show\"] = True\n elif operation_name == \"Drop Column\":\n build_config[\"column_name\"][\"show\"] = True\n elif operation_name == \"Rename Column\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"new_column_name\"][\"show\"] = True\n elif operation_name == \"Add Column\":\n build_config[\"new_column_name\"][\"show\"] = True\n build_config[\"new_column_value\"][\"show\"] = True\n elif operation_name == \"Select Columns\":\n build_config[\"columns_to_select\"][\"show\"] = True\n elif operation_name in {\"Head\", \"Tail\"}:\n build_config[\"num_rows\"][\"show\"] = True\n elif operation_name == \"Replace Value\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"replace_value\"][\"show\"] = True\n build_config[\"replacement_value\"][\"show\"] = True\n elif operation_name == \"Drop Duplicates\":\n build_config[\"column_name\"][\"show\"] = True\n\n return build_config\n\n def perform_operation(self) -> DataFrame:\n df_copy = self.df.copy()\n\n # Handle SortableListInput format for operation\n operation_input = getattr(self, \"operation\", [])\n if isinstance(operation_input, list) and len(operation_input) > 0:\n op = operation_input[0].get(\"name\", \"\")\n else:\n op = \"\"\n\n # If no operation selected, return original DataFrame\n if not op:\n return df_copy\n\n if op == \"Filter\":\n return self.filter_rows_by_value(df_copy)\n if op == \"Sort\":\n return self.sort_by_column(df_copy)\n if op == \"Drop Column\":\n return self.drop_column(df_copy)\n if op == \"Rename Column\":\n return self.rename_column(df_copy)\n if op == \"Add Column\":\n return self.add_column(df_copy)\n if op == \"Select Columns\":\n return self.select_columns(df_copy)\n if op == \"Head\":\n return self.head(df_copy)\n if op == \"Tail\":\n return self.tail(df_copy)\n if op == \"Replace Value\":\n return self.replace_values(df_copy)\n if op == \"Drop Duplicates\":\n return self.drop_duplicates(df_copy)\n msg = f\"Unsupported operation: {op}\"\n logger.error(msg)\n raise ValueError(msg)\n\n def filter_rows_by_value(self, df: DataFrame) -> DataFrame:\n column = df[self.column_name]\n filter_value = self.filter_value\n\n # Handle regular DropdownInput format (just a string value)\n operator = getattr(self, \"filter_operator\", \"equals\") # Default to equals for backward compatibility\n\n if operator == \"equals\":\n mask = column == filter_value\n elif operator == \"not equals\":\n mask = column != filter_value\n elif operator == \"contains\":\n mask = column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"not contains\":\n mask = ~column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"starts with\":\n mask = column.astype(str).str.startswith(str(filter_value), na=False)\n elif operator == \"ends with\":\n mask = column.astype(str).str.endswith(str(filter_value), na=False)\n elif operator == \"greater than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column > numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) > str(filter_value)\n elif operator == \"less than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column < numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) < str(filter_value)\n else:\n mask = column == filter_value # Fallback to equals\n\n return DataFrame(df[mask])\n\n def sort_by_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.sort_values(by=self.column_name, ascending=self.ascending))\n\n def drop_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop(columns=[self.column_name]))\n\n def rename_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.rename(columns={self.column_name: self.new_column_name}))\n\n def add_column(self, df: DataFrame) -> DataFrame:\n df[self.new_column_name] = [self.new_column_value] * len(df)\n return DataFrame(df)\n\n def select_columns(self, df: DataFrame) -> DataFrame:\n columns = [col.strip() for col in self.columns_to_select]\n return DataFrame(df[columns])\n\n def head(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.head(self.num_rows))\n\n def tail(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.tail(self.num_rows))\n\n def replace_values(self, df: DataFrame) -> DataFrame:\n df[self.column_name] = df[self.column_name].replace(self.replace_value, self.replacement_value)\n return DataFrame(df)\n\n def drop_duplicates(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop_duplicates(subset=self.column_name))\n" + "value": "import pandas as pd\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.inputs import SortableListInput\nfrom lfx.io import BoolInput, DataFrameInput, DropdownInput, IntInput, MessageTextInput, Output, StrInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dataframe import DataFrame\n\n\nclass DataFrameOperationsComponent(Component):\n display_name = \"DataFrame Operations\"\n description = \"Perform various operations on a DataFrame.\"\n documentation: str = \"https://docs.langflow.org/dataframe-operations\"\n icon = \"table\"\n name = \"DataFrameOperations\"\n\n OPERATION_CHOICES = [\n \"Add Column\",\n \"Drop Column\",\n \"Filter\",\n \"Head\",\n \"Rename Column\",\n \"Replace Value\",\n \"Select Columns\",\n \"Sort\",\n \"Tail\",\n \"Drop Duplicates\",\n ]\n\n inputs = [\n DataFrameInput(\n name=\"df\",\n display_name=\"DataFrame\",\n info=\"The input DataFrame to operate on.\",\n required=True,\n ),\n SortableListInput(\n name=\"operation\",\n display_name=\"Operation\",\n placeholder=\"Select Operation\",\n info=\"Select the DataFrame operation to perform.\",\n options=[\n {\"name\": \"Add Column\", \"icon\": \"plus\"},\n {\"name\": \"Drop Column\", \"icon\": \"minus\"},\n {\"name\": \"Filter\", \"icon\": \"filter\"},\n {\"name\": \"Head\", \"icon\": \"arrow-up\"},\n {\"name\": \"Rename Column\", \"icon\": \"pencil\"},\n {\"name\": \"Replace Value\", \"icon\": \"replace\"},\n {\"name\": \"Select Columns\", \"icon\": \"columns\"},\n {\"name\": \"Sort\", \"icon\": \"arrow-up-down\"},\n {\"name\": \"Tail\", \"icon\": \"arrow-down\"},\n {\"name\": \"Drop Duplicates\", \"icon\": \"copy-x\"},\n ],\n real_time_refresh=True,\n limit=1,\n ),\n StrInput(\n name=\"column_name\",\n display_name=\"Column Name\",\n info=\"The column name to use for the operation.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"filter_value\",\n display_name=\"Filter Value\",\n info=\"The value to filter rows by.\",\n dynamic=True,\n show=False,\n ),\n DropdownInput(\n name=\"filter_operator\",\n display_name=\"Filter Operator\",\n options=[\n \"equals\",\n \"not equals\",\n \"contains\",\n \"not contains\",\n \"starts with\",\n \"ends with\",\n \"greater than\",\n \"less than\",\n ],\n value=\"equals\",\n info=\"The operator to apply for filtering rows.\",\n advanced=False,\n dynamic=True,\n show=False,\n ),\n BoolInput(\n name=\"ascending\",\n display_name=\"Sort Ascending\",\n info=\"Whether to sort in ascending order.\",\n dynamic=True,\n show=False,\n value=True,\n ),\n StrInput(\n name=\"new_column_name\",\n display_name=\"New Column Name\",\n info=\"The new column name when renaming or adding a column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"new_column_value\",\n display_name=\"New Column Value\",\n info=\"The value to populate the new column with.\",\n dynamic=True,\n show=False,\n ),\n StrInput(\n name=\"columns_to_select\",\n display_name=\"Columns to Select\",\n dynamic=True,\n is_list=True,\n show=False,\n ),\n IntInput(\n name=\"num_rows\",\n display_name=\"Number of Rows\",\n info=\"Number of rows to return (for head/tail).\",\n dynamic=True,\n show=False,\n value=5,\n ),\n MessageTextInput(\n name=\"replace_value\",\n display_name=\"Value to Replace\",\n info=\"The value to replace in the column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"replacement_value\",\n display_name=\"Replacement Value\",\n info=\"The value to replace with.\",\n dynamic=True,\n show=False,\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"DataFrame\",\n name=\"output\",\n method=\"perform_operation\",\n info=\"The resulting DataFrame after the operation.\",\n )\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n dynamic_fields = [\n \"column_name\",\n \"filter_value\",\n \"filter_operator\",\n \"ascending\",\n \"new_column_name\",\n \"new_column_value\",\n \"columns_to_select\",\n \"num_rows\",\n \"replace_value\",\n \"replacement_value\",\n ]\n for field in dynamic_fields:\n build_config[field][\"show\"] = False\n\n if field_name == \"operation\":\n # Handle SortableListInput format\n if isinstance(field_value, list):\n operation_name = field_value[0].get(\"name\", \"\") if field_value else \"\"\n else:\n operation_name = field_value or \"\"\n\n # If no operation selected, all dynamic fields stay hidden (already set to False above)\n if not operation_name:\n return build_config\n\n if operation_name == \"Filter\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"filter_value\"][\"show\"] = True\n build_config[\"filter_operator\"][\"show\"] = True\n elif operation_name == \"Sort\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"ascending\"][\"show\"] = True\n elif operation_name == \"Drop Column\":\n build_config[\"column_name\"][\"show\"] = True\n elif operation_name == \"Rename Column\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"new_column_name\"][\"show\"] = True\n elif operation_name == \"Add Column\":\n build_config[\"new_column_name\"][\"show\"] = True\n build_config[\"new_column_value\"][\"show\"] = True\n elif operation_name == \"Select Columns\":\n build_config[\"columns_to_select\"][\"show\"] = True\n elif operation_name in {\"Head\", \"Tail\"}:\n build_config[\"num_rows\"][\"show\"] = True\n elif operation_name == \"Replace Value\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"replace_value\"][\"show\"] = True\n build_config[\"replacement_value\"][\"show\"] = True\n elif operation_name == \"Drop Duplicates\":\n build_config[\"column_name\"][\"show\"] = True\n\n return build_config\n\n def perform_operation(self) -> DataFrame:\n df_copy = self.df.copy()\n\n # Handle SortableListInput format for operation\n operation_input = getattr(self, \"operation\", [])\n if isinstance(operation_input, list) and len(operation_input) > 0:\n op = operation_input[0].get(\"name\", \"\")\n else:\n op = \"\"\n\n # If no operation selected, return original DataFrame\n if not op:\n return df_copy\n\n if op == \"Filter\":\n return self.filter_rows_by_value(df_copy)\n if op == \"Sort\":\n return self.sort_by_column(df_copy)\n if op == \"Drop Column\":\n return self.drop_column(df_copy)\n if op == \"Rename Column\":\n return self.rename_column(df_copy)\n if op == \"Add Column\":\n return self.add_column(df_copy)\n if op == \"Select Columns\":\n return self.select_columns(df_copy)\n if op == \"Head\":\n return self.head(df_copy)\n if op == \"Tail\":\n return self.tail(df_copy)\n if op == \"Replace Value\":\n return self.replace_values(df_copy)\n if op == \"Drop Duplicates\":\n return self.drop_duplicates(df_copy)\n msg = f\"Unsupported operation: {op}\"\n logger.error(msg)\n raise ValueError(msg)\n\n def filter_rows_by_value(self, df: DataFrame) -> DataFrame:\n column = df[self.column_name]\n filter_value = self.filter_value\n\n # Handle regular DropdownInput format (just a string value)\n operator = getattr(self, \"filter_operator\", \"equals\") # Default to equals for backward compatibility\n\n if operator == \"equals\":\n mask = column == filter_value\n elif operator == \"not equals\":\n mask = column != filter_value\n elif operator == \"contains\":\n mask = column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"not contains\":\n mask = ~column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"starts with\":\n mask = column.astype(str).str.startswith(str(filter_value), na=False)\n elif operator == \"ends with\":\n mask = column.astype(str).str.endswith(str(filter_value), na=False)\n elif operator == \"greater than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column > numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) > str(filter_value)\n elif operator == \"less than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column < numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) < str(filter_value)\n else:\n mask = column == filter_value # Fallback to equals\n\n return DataFrame(df[mask])\n\n def sort_by_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.sort_values(by=self.column_name, ascending=self.ascending))\n\n def drop_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop(columns=[self.column_name]))\n\n def rename_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.rename(columns={self.column_name: self.new_column_name}))\n\n def add_column(self, df: DataFrame) -> DataFrame:\n df[self.new_column_name] = [self.new_column_value] * len(df)\n return DataFrame(df)\n\n def select_columns(self, df: DataFrame) -> DataFrame:\n columns = [col.strip() for col in self.columns_to_select]\n return DataFrame(df[columns])\n\n def head(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.head(self.num_rows))\n\n def tail(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.tail(self.num_rows))\n\n def replace_values(self, df: DataFrame) -> DataFrame:\n df[self.column_name] = df[self.column_name].replace(self.replace_value, self.replacement_value)\n return DataFrame(df)\n\n def drop_duplicates(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop_duplicates(subset=self.column_name))\n" }, "column_name": { "_input_type": "StrInput", @@ -2711,12 +2151,14 @@ "list_add_label": "Add More", "load_from_db": false, "name": "column_name", + "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -2730,12 +2172,14 @@ "list_add_label": "Add More", "load_from_db": false, "name": "columns_to_select", + "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -2751,6 +2195,7 @@ "list": false, "list_add_label": "Add More", "name": "df", + "override_skip": false, "placeholder": "", "required": true, "show": true, @@ -2758,6 +2203,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "other", "value": "" }, @@ -2782,6 +2228,7 @@ "less than" ], "options_metadata": [], + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -2789,6 +2236,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "equals" }, @@ -2805,6 +2253,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "filter_value", + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -2812,9 +2261,11 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, + "is_refresh": false, "new_column_name": { "_input_type": "StrInput", "advanced": false, @@ -2825,12 +2276,14 @@ "list_add_label": "Add More", "load_from_db": false, "name": "new_column_name", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "filename" }, @@ -2847,6 +2300,7 @@ "list_add_label": "Add More", "load_from_db": true, "name": "new_column_value", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -2854,6 +2308,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "FILENAME" }, @@ -2866,12 +2321,14 @@ "list": false, "list_add_label": "Add More", "name": "num_rows", + "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": 5 }, @@ -2882,6 +2339,7 @@ "dynamic": false, "info": "Select the DataFrame operation to perform.", "limit": 1, + "load_from_db": false, "name": "operation", "options": [ { @@ -2925,6 +2383,7 @@ "name": "Drop Duplicates" } ], + "override_skip": false, "placeholder": "Select Operation", "real_time_refresh": true, "required": false, @@ -2933,6 +2392,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "sortableList", "value": [ { @@ -2956,6 +2416,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "replace_value", + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -2963,6 +2424,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -2979,6 +2441,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "replacement_value", + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -2986,6 +2449,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" } @@ -3002,14 +2466,16 @@ "width": 320 }, "position": { - "x": 513.7675419899799, - "y": 1088.8324804581666 + "x": 575.218683966709, + "y": 1607.3264908868193 }, "selected": false, "type": "genericNode" }, { "data": { + "description": "Perform various operations on a DataFrame.", + "display_name": "DataFrame Operations", "id": "DataFrameOperations-N80fC", "node": { "base_classes": [ @@ -3020,7 +2486,7 @@ "custom_fields": {}, "description": "Perform various operations on a DataFrame.", "display_name": "DataFrame Operations", - "documentation": "https://docs.langflow.org/components-processing#dataframe-operations", + "documentation": "https://docs.langflow.org/dataframe-operations", "edited": false, "field_order": [ "df", @@ -3038,11 +2504,11 @@ ], "frozen": false, "icon": "table", - "last_updated": "2025-11-24T18:01:42.469Z", + "last_updated": "2025-11-26T00:02:32.726Z", "legacy": false, - "lf_version": "1.6.3.dev0", + "lf_version": "1.7.0", "metadata": { - "code_hash": "b4d6b19b6eef", + "code_hash": "904f4eaebccd", "dependencies": { "dependencies": [ { @@ -3051,12 +2517,12 @@ }, { "name": "lfx", - "version": "0.1.12.dev31" + "version": null } ], "total_dependencies": 2 }, - "module": "lfx.components.processing.dataframe_operations.DataFrameOperationsComponent" + "module": "custom_components.dataframe_operations" }, "minimized": false, "output_types": [], @@ -3066,6 +2532,7 @@ "cache": true, "display_name": "DataFrame", "group_outputs": false, + "loop_types": null, "method": "perform_operation", "name": "output", "options": null, @@ -3080,6 +2547,12 @@ ], "pinned": false, "template": { + "_frontend_node_flow_id": { + "value": "5488df7c-b93f-4f87-a446-b67028bc0813" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, "_type": "Component", "ascending": { "_input_type": "BoolInput", @@ -3090,12 +2563,14 @@ "list": false, "list_add_label": "Add More", "name": "ascending", + "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "bool", "value": true }, @@ -3115,7 +2590,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import pandas as pd\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.inputs import SortableListInput\nfrom lfx.io import BoolInput, DataFrameInput, DropdownInput, IntInput, MessageTextInput, Output, StrInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dataframe import DataFrame\n\n\nclass DataFrameOperationsComponent(Component):\n display_name = \"DataFrame Operations\"\n description = \"Perform various operations on a DataFrame.\"\n documentation: str = \"https://docs.langflow.org/components-processing#dataframe-operations\"\n icon = \"table\"\n name = \"DataFrameOperations\"\n\n OPERATION_CHOICES = [\n \"Add Column\",\n \"Drop Column\",\n \"Filter\",\n \"Head\",\n \"Rename Column\",\n \"Replace Value\",\n \"Select Columns\",\n \"Sort\",\n \"Tail\",\n \"Drop Duplicates\",\n ]\n\n inputs = [\n DataFrameInput(\n name=\"df\",\n display_name=\"DataFrame\",\n info=\"The input DataFrame to operate on.\",\n required=True,\n ),\n SortableListInput(\n name=\"operation\",\n display_name=\"Operation\",\n placeholder=\"Select Operation\",\n info=\"Select the DataFrame operation to perform.\",\n options=[\n {\"name\": \"Add Column\", \"icon\": \"plus\"},\n {\"name\": \"Drop Column\", \"icon\": \"minus\"},\n {\"name\": \"Filter\", \"icon\": \"filter\"},\n {\"name\": \"Head\", \"icon\": \"arrow-up\"},\n {\"name\": \"Rename Column\", \"icon\": \"pencil\"},\n {\"name\": \"Replace Value\", \"icon\": \"replace\"},\n {\"name\": \"Select Columns\", \"icon\": \"columns\"},\n {\"name\": \"Sort\", \"icon\": \"arrow-up-down\"},\n {\"name\": \"Tail\", \"icon\": \"arrow-down\"},\n {\"name\": \"Drop Duplicates\", \"icon\": \"copy-x\"},\n ],\n real_time_refresh=True,\n limit=1,\n ),\n StrInput(\n name=\"column_name\",\n display_name=\"Column Name\",\n info=\"The column name to use for the operation.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"filter_value\",\n display_name=\"Filter Value\",\n info=\"The value to filter rows by.\",\n dynamic=True,\n show=False,\n ),\n DropdownInput(\n name=\"filter_operator\",\n display_name=\"Filter Operator\",\n options=[\n \"equals\",\n \"not equals\",\n \"contains\",\n \"not contains\",\n \"starts with\",\n \"ends with\",\n \"greater than\",\n \"less than\",\n ],\n value=\"equals\",\n info=\"The operator to apply for filtering rows.\",\n advanced=False,\n dynamic=True,\n show=False,\n ),\n BoolInput(\n name=\"ascending\",\n display_name=\"Sort Ascending\",\n info=\"Whether to sort in ascending order.\",\n dynamic=True,\n show=False,\n value=True,\n ),\n StrInput(\n name=\"new_column_name\",\n display_name=\"New Column Name\",\n info=\"The new column name when renaming or adding a column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"new_column_value\",\n display_name=\"New Column Value\",\n info=\"The value to populate the new column with.\",\n dynamic=True,\n show=False,\n ),\n StrInput(\n name=\"columns_to_select\",\n display_name=\"Columns to Select\",\n dynamic=True,\n is_list=True,\n show=False,\n ),\n IntInput(\n name=\"num_rows\",\n display_name=\"Number of Rows\",\n info=\"Number of rows to return (for head/tail).\",\n dynamic=True,\n show=False,\n value=5,\n ),\n MessageTextInput(\n name=\"replace_value\",\n display_name=\"Value to Replace\",\n info=\"The value to replace in the column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"replacement_value\",\n display_name=\"Replacement Value\",\n info=\"The value to replace with.\",\n dynamic=True,\n show=False,\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"DataFrame\",\n name=\"output\",\n method=\"perform_operation\",\n info=\"The resulting DataFrame after the operation.\",\n )\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n dynamic_fields = [\n \"column_name\",\n \"filter_value\",\n \"filter_operator\",\n \"ascending\",\n \"new_column_name\",\n \"new_column_value\",\n \"columns_to_select\",\n \"num_rows\",\n \"replace_value\",\n \"replacement_value\",\n ]\n for field in dynamic_fields:\n build_config[field][\"show\"] = False\n\n if field_name == \"operation\":\n # Handle SortableListInput format\n if isinstance(field_value, list):\n operation_name = field_value[0].get(\"name\", \"\") if field_value else \"\"\n else:\n operation_name = field_value or \"\"\n\n # If no operation selected, all dynamic fields stay hidden (already set to False above)\n if not operation_name:\n return build_config\n\n if operation_name == \"Filter\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"filter_value\"][\"show\"] = True\n build_config[\"filter_operator\"][\"show\"] = True\n elif operation_name == \"Sort\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"ascending\"][\"show\"] = True\n elif operation_name == \"Drop Column\":\n build_config[\"column_name\"][\"show\"] = True\n elif operation_name == \"Rename Column\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"new_column_name\"][\"show\"] = True\n elif operation_name == \"Add Column\":\n build_config[\"new_column_name\"][\"show\"] = True\n build_config[\"new_column_value\"][\"show\"] = True\n elif operation_name == \"Select Columns\":\n build_config[\"columns_to_select\"][\"show\"] = True\n elif operation_name in {\"Head\", \"Tail\"}:\n build_config[\"num_rows\"][\"show\"] = True\n elif operation_name == \"Replace Value\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"replace_value\"][\"show\"] = True\n build_config[\"replacement_value\"][\"show\"] = True\n elif operation_name == \"Drop Duplicates\":\n build_config[\"column_name\"][\"show\"] = True\n\n return build_config\n\n def perform_operation(self) -> DataFrame:\n df_copy = self.df.copy()\n\n # Handle SortableListInput format for operation\n operation_input = getattr(self, \"operation\", [])\n if isinstance(operation_input, list) and len(operation_input) > 0:\n op = operation_input[0].get(\"name\", \"\")\n else:\n op = \"\"\n\n # If no operation selected, return original DataFrame\n if not op:\n return df_copy\n\n if op == \"Filter\":\n return self.filter_rows_by_value(df_copy)\n if op == \"Sort\":\n return self.sort_by_column(df_copy)\n if op == \"Drop Column\":\n return self.drop_column(df_copy)\n if op == \"Rename Column\":\n return self.rename_column(df_copy)\n if op == \"Add Column\":\n return self.add_column(df_copy)\n if op == \"Select Columns\":\n return self.select_columns(df_copy)\n if op == \"Head\":\n return self.head(df_copy)\n if op == \"Tail\":\n return self.tail(df_copy)\n if op == \"Replace Value\":\n return self.replace_values(df_copy)\n if op == \"Drop Duplicates\":\n return self.drop_duplicates(df_copy)\n msg = f\"Unsupported operation: {op}\"\n logger.error(msg)\n raise ValueError(msg)\n\n def filter_rows_by_value(self, df: DataFrame) -> DataFrame:\n column = df[self.column_name]\n filter_value = self.filter_value\n\n # Handle regular DropdownInput format (just a string value)\n operator = getattr(self, \"filter_operator\", \"equals\") # Default to equals for backward compatibility\n\n if operator == \"equals\":\n mask = column == filter_value\n elif operator == \"not equals\":\n mask = column != filter_value\n elif operator == \"contains\":\n mask = column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"not contains\":\n mask = ~column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"starts with\":\n mask = column.astype(str).str.startswith(str(filter_value), na=False)\n elif operator == \"ends with\":\n mask = column.astype(str).str.endswith(str(filter_value), na=False)\n elif operator == \"greater than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column > numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) > str(filter_value)\n elif operator == \"less than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column < numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) < str(filter_value)\n else:\n mask = column == filter_value # Fallback to equals\n\n return DataFrame(df[mask])\n\n def sort_by_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.sort_values(by=self.column_name, ascending=self.ascending))\n\n def drop_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop(columns=[self.column_name]))\n\n def rename_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.rename(columns={self.column_name: self.new_column_name}))\n\n def add_column(self, df: DataFrame) -> DataFrame:\n df[self.new_column_name] = [self.new_column_value] * len(df)\n return DataFrame(df)\n\n def select_columns(self, df: DataFrame) -> DataFrame:\n columns = [col.strip() for col in self.columns_to_select]\n return DataFrame(df[columns])\n\n def head(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.head(self.num_rows))\n\n def tail(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.tail(self.num_rows))\n\n def replace_values(self, df: DataFrame) -> DataFrame:\n df[self.column_name] = df[self.column_name].replace(self.replace_value, self.replacement_value)\n return DataFrame(df)\n\n def drop_duplicates(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop_duplicates(subset=self.column_name))\n" + "value": "import pandas as pd\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.inputs import SortableListInput\nfrom lfx.io import BoolInput, DataFrameInput, DropdownInput, IntInput, MessageTextInput, Output, StrInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dataframe import DataFrame\n\n\nclass DataFrameOperationsComponent(Component):\n display_name = \"DataFrame Operations\"\n description = \"Perform various operations on a DataFrame.\"\n documentation: str = \"https://docs.langflow.org/dataframe-operations\"\n icon = \"table\"\n name = \"DataFrameOperations\"\n\n OPERATION_CHOICES = [\n \"Add Column\",\n \"Drop Column\",\n \"Filter\",\n \"Head\",\n \"Rename Column\",\n \"Replace Value\",\n \"Select Columns\",\n \"Sort\",\n \"Tail\",\n \"Drop Duplicates\",\n ]\n\n inputs = [\n DataFrameInput(\n name=\"df\",\n display_name=\"DataFrame\",\n info=\"The input DataFrame to operate on.\",\n required=True,\n ),\n SortableListInput(\n name=\"operation\",\n display_name=\"Operation\",\n placeholder=\"Select Operation\",\n info=\"Select the DataFrame operation to perform.\",\n options=[\n {\"name\": \"Add Column\", \"icon\": \"plus\"},\n {\"name\": \"Drop Column\", \"icon\": \"minus\"},\n {\"name\": \"Filter\", \"icon\": \"filter\"},\n {\"name\": \"Head\", \"icon\": \"arrow-up\"},\n {\"name\": \"Rename Column\", \"icon\": \"pencil\"},\n {\"name\": \"Replace Value\", \"icon\": \"replace\"},\n {\"name\": \"Select Columns\", \"icon\": \"columns\"},\n {\"name\": \"Sort\", \"icon\": \"arrow-up-down\"},\n {\"name\": \"Tail\", \"icon\": \"arrow-down\"},\n {\"name\": \"Drop Duplicates\", \"icon\": \"copy-x\"},\n ],\n real_time_refresh=True,\n limit=1,\n ),\n StrInput(\n name=\"column_name\",\n display_name=\"Column Name\",\n info=\"The column name to use for the operation.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"filter_value\",\n display_name=\"Filter Value\",\n info=\"The value to filter rows by.\",\n dynamic=True,\n show=False,\n ),\n DropdownInput(\n name=\"filter_operator\",\n display_name=\"Filter Operator\",\n options=[\n \"equals\",\n \"not equals\",\n \"contains\",\n \"not contains\",\n \"starts with\",\n \"ends with\",\n \"greater than\",\n \"less than\",\n ],\n value=\"equals\",\n info=\"The operator to apply for filtering rows.\",\n advanced=False,\n dynamic=True,\n show=False,\n ),\n BoolInput(\n name=\"ascending\",\n display_name=\"Sort Ascending\",\n info=\"Whether to sort in ascending order.\",\n dynamic=True,\n show=False,\n value=True,\n ),\n StrInput(\n name=\"new_column_name\",\n display_name=\"New Column Name\",\n info=\"The new column name when renaming or adding a column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"new_column_value\",\n display_name=\"New Column Value\",\n info=\"The value to populate the new column with.\",\n dynamic=True,\n show=False,\n ),\n StrInput(\n name=\"columns_to_select\",\n display_name=\"Columns to Select\",\n dynamic=True,\n is_list=True,\n show=False,\n ),\n IntInput(\n name=\"num_rows\",\n display_name=\"Number of Rows\",\n info=\"Number of rows to return (for head/tail).\",\n dynamic=True,\n show=False,\n value=5,\n ),\n MessageTextInput(\n name=\"replace_value\",\n display_name=\"Value to Replace\",\n info=\"The value to replace in the column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"replacement_value\",\n display_name=\"Replacement Value\",\n info=\"The value to replace with.\",\n dynamic=True,\n show=False,\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"DataFrame\",\n name=\"output\",\n method=\"perform_operation\",\n info=\"The resulting DataFrame after the operation.\",\n )\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n dynamic_fields = [\n \"column_name\",\n \"filter_value\",\n \"filter_operator\",\n \"ascending\",\n \"new_column_name\",\n \"new_column_value\",\n \"columns_to_select\",\n \"num_rows\",\n \"replace_value\",\n \"replacement_value\",\n ]\n for field in dynamic_fields:\n build_config[field][\"show\"] = False\n\n if field_name == \"operation\":\n # Handle SortableListInput format\n if isinstance(field_value, list):\n operation_name = field_value[0].get(\"name\", \"\") if field_value else \"\"\n else:\n operation_name = field_value or \"\"\n\n # If no operation selected, all dynamic fields stay hidden (already set to False above)\n if not operation_name:\n return build_config\n\n if operation_name == \"Filter\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"filter_value\"][\"show\"] = True\n build_config[\"filter_operator\"][\"show\"] = True\n elif operation_name == \"Sort\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"ascending\"][\"show\"] = True\n elif operation_name == \"Drop Column\":\n build_config[\"column_name\"][\"show\"] = True\n elif operation_name == \"Rename Column\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"new_column_name\"][\"show\"] = True\n elif operation_name == \"Add Column\":\n build_config[\"new_column_name\"][\"show\"] = True\n build_config[\"new_column_value\"][\"show\"] = True\n elif operation_name == \"Select Columns\":\n build_config[\"columns_to_select\"][\"show\"] = True\n elif operation_name in {\"Head\", \"Tail\"}:\n build_config[\"num_rows\"][\"show\"] = True\n elif operation_name == \"Replace Value\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"replace_value\"][\"show\"] = True\n build_config[\"replacement_value\"][\"show\"] = True\n elif operation_name == \"Drop Duplicates\":\n build_config[\"column_name\"][\"show\"] = True\n\n return build_config\n\n def perform_operation(self) -> DataFrame:\n df_copy = self.df.copy()\n\n # Handle SortableListInput format for operation\n operation_input = getattr(self, \"operation\", [])\n if isinstance(operation_input, list) and len(operation_input) > 0:\n op = operation_input[0].get(\"name\", \"\")\n else:\n op = \"\"\n\n # If no operation selected, return original DataFrame\n if not op:\n return df_copy\n\n if op == \"Filter\":\n return self.filter_rows_by_value(df_copy)\n if op == \"Sort\":\n return self.sort_by_column(df_copy)\n if op == \"Drop Column\":\n return self.drop_column(df_copy)\n if op == \"Rename Column\":\n return self.rename_column(df_copy)\n if op == \"Add Column\":\n return self.add_column(df_copy)\n if op == \"Select Columns\":\n return self.select_columns(df_copy)\n if op == \"Head\":\n return self.head(df_copy)\n if op == \"Tail\":\n return self.tail(df_copy)\n if op == \"Replace Value\":\n return self.replace_values(df_copy)\n if op == \"Drop Duplicates\":\n return self.drop_duplicates(df_copy)\n msg = f\"Unsupported operation: {op}\"\n logger.error(msg)\n raise ValueError(msg)\n\n def filter_rows_by_value(self, df: DataFrame) -> DataFrame:\n column = df[self.column_name]\n filter_value = self.filter_value\n\n # Handle regular DropdownInput format (just a string value)\n operator = getattr(self, \"filter_operator\", \"equals\") # Default to equals for backward compatibility\n\n if operator == \"equals\":\n mask = column == filter_value\n elif operator == \"not equals\":\n mask = column != filter_value\n elif operator == \"contains\":\n mask = column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"not contains\":\n mask = ~column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"starts with\":\n mask = column.astype(str).str.startswith(str(filter_value), na=False)\n elif operator == \"ends with\":\n mask = column.astype(str).str.endswith(str(filter_value), na=False)\n elif operator == \"greater than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column > numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) > str(filter_value)\n elif operator == \"less than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column < numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) < str(filter_value)\n else:\n mask = column == filter_value # Fallback to equals\n\n return DataFrame(df[mask])\n\n def sort_by_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.sort_values(by=self.column_name, ascending=self.ascending))\n\n def drop_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop(columns=[self.column_name]))\n\n def rename_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.rename(columns={self.column_name: self.new_column_name}))\n\n def add_column(self, df: DataFrame) -> DataFrame:\n df[self.new_column_name] = [self.new_column_value] * len(df)\n return DataFrame(df)\n\n def select_columns(self, df: DataFrame) -> DataFrame:\n columns = [col.strip() for col in self.columns_to_select]\n return DataFrame(df[columns])\n\n def head(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.head(self.num_rows))\n\n def tail(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.tail(self.num_rows))\n\n def replace_values(self, df: DataFrame) -> DataFrame:\n df[self.column_name] = df[self.column_name].replace(self.replace_value, self.replacement_value)\n return DataFrame(df)\n\n def drop_duplicates(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop_duplicates(subset=self.column_name))\n" }, "column_name": { "_input_type": "StrInput", @@ -3127,12 +2602,14 @@ "list_add_label": "Add More", "load_from_db": false, "name": "column_name", + "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -3146,12 +2623,14 @@ "list_add_label": "Add More", "load_from_db": false, "name": "columns_to_select", + "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -3167,6 +2646,7 @@ "list": false, "list_add_label": "Add More", "name": "df", + "override_skip": false, "placeholder": "", "required": true, "show": true, @@ -3174,6 +2654,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "other", "value": "" }, @@ -3198,6 +2679,7 @@ "less than" ], "options_metadata": [], + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -3205,6 +2687,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "equals" }, @@ -3221,6 +2704,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "filter_value", + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -3228,9 +2712,11 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, + "is_refresh": false, "new_column_name": { "_input_type": "StrInput", "advanced": false, @@ -3241,12 +2727,14 @@ "list_add_label": "Add More", "load_from_db": false, "name": "new_column_name", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "mimetype" }, @@ -3263,6 +2751,7 @@ "list_add_label": "Add More", "load_from_db": true, "name": "new_column_value", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -3270,6 +2759,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "MIMETYPE" }, @@ -3282,12 +2772,14 @@ "list": false, "list_add_label": "Add More", "name": "num_rows", + "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": 5 }, @@ -3298,6 +2790,7 @@ "dynamic": false, "info": "Select the DataFrame operation to perform.", "limit": 1, + "load_from_db": false, "name": "operation", "options": [ { @@ -3341,6 +2834,7 @@ "name": "Drop Duplicates" } ], + "override_skip": false, "placeholder": "Select Operation", "real_time_refresh": true, "required": false, @@ -3349,6 +2843,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "sortableList", "value": [ { @@ -3372,6 +2867,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "replace_value", + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -3379,6 +2875,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -3395,6 +2892,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "replacement_value", + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -3402,6 +2900,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" } @@ -3418,14 +2917,16 @@ "width": 320 }, "position": { - "x": 1314.7870797625949, - "y": 1135.9990962860586 + "x": 1330.149865256777, + "y": 1619.9268393528012 }, "selected": false, "type": "genericNode" }, { "data": { + "description": "Perform various operations on a DataFrame.", + "display_name": "DataFrame Operations", "id": "DataFrameOperations-9vMrp", "node": { "base_classes": [ @@ -3436,7 +2937,7 @@ "custom_fields": {}, "description": "Perform various operations on a DataFrame.", "display_name": "DataFrame Operations", - "documentation": "https://docs.langflow.org/components-processing#dataframe-operations", + "documentation": "https://docs.langflow.org/dataframe-operations", "edited": false, "field_order": [ "df", @@ -3454,11 +2955,11 @@ ], "frozen": false, "icon": "table", - "last_updated": "2025-11-24T18:01:42.469Z", + "last_updated": "2025-11-26T00:02:32.727Z", "legacy": false, - "lf_version": "1.6.3.dev0", + "lf_version": "1.7.0", "metadata": { - "code_hash": "b4d6b19b6eef", + "code_hash": "904f4eaebccd", "dependencies": { "dependencies": [ { @@ -3467,12 +2968,12 @@ }, { "name": "lfx", - "version": "0.1.12.dev31" + "version": null } ], "total_dependencies": 2 }, - "module": "lfx.components.processing.dataframe_operations.DataFrameOperationsComponent" + "module": "custom_components.dataframe_operations" }, "minimized": false, "output_types": [], @@ -3482,6 +2983,7 @@ "cache": true, "display_name": "DataFrame", "group_outputs": false, + "loop_types": null, "method": "perform_operation", "name": "output", "options": null, @@ -3496,6 +2998,12 @@ ], "pinned": false, "template": { + "_frontend_node_flow_id": { + "value": "5488df7c-b93f-4f87-a446-b67028bc0813" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, "_type": "Component", "ascending": { "_input_type": "BoolInput", @@ -3506,12 +3014,14 @@ "list": false, "list_add_label": "Add More", "name": "ascending", + "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "bool", "value": true }, @@ -3531,7 +3041,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import pandas as pd\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.inputs import SortableListInput\nfrom lfx.io import BoolInput, DataFrameInput, DropdownInput, IntInput, MessageTextInput, Output, StrInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dataframe import DataFrame\n\n\nclass DataFrameOperationsComponent(Component):\n display_name = \"DataFrame Operations\"\n description = \"Perform various operations on a DataFrame.\"\n documentation: str = \"https://docs.langflow.org/components-processing#dataframe-operations\"\n icon = \"table\"\n name = \"DataFrameOperations\"\n\n OPERATION_CHOICES = [\n \"Add Column\",\n \"Drop Column\",\n \"Filter\",\n \"Head\",\n \"Rename Column\",\n \"Replace Value\",\n \"Select Columns\",\n \"Sort\",\n \"Tail\",\n \"Drop Duplicates\",\n ]\n\n inputs = [\n DataFrameInput(\n name=\"df\",\n display_name=\"DataFrame\",\n info=\"The input DataFrame to operate on.\",\n required=True,\n ),\n SortableListInput(\n name=\"operation\",\n display_name=\"Operation\",\n placeholder=\"Select Operation\",\n info=\"Select the DataFrame operation to perform.\",\n options=[\n {\"name\": \"Add Column\", \"icon\": \"plus\"},\n {\"name\": \"Drop Column\", \"icon\": \"minus\"},\n {\"name\": \"Filter\", \"icon\": \"filter\"},\n {\"name\": \"Head\", \"icon\": \"arrow-up\"},\n {\"name\": \"Rename Column\", \"icon\": \"pencil\"},\n {\"name\": \"Replace Value\", \"icon\": \"replace\"},\n {\"name\": \"Select Columns\", \"icon\": \"columns\"},\n {\"name\": \"Sort\", \"icon\": \"arrow-up-down\"},\n {\"name\": \"Tail\", \"icon\": \"arrow-down\"},\n {\"name\": \"Drop Duplicates\", \"icon\": \"copy-x\"},\n ],\n real_time_refresh=True,\n limit=1,\n ),\n StrInput(\n name=\"column_name\",\n display_name=\"Column Name\",\n info=\"The column name to use for the operation.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"filter_value\",\n display_name=\"Filter Value\",\n info=\"The value to filter rows by.\",\n dynamic=True,\n show=False,\n ),\n DropdownInput(\n name=\"filter_operator\",\n display_name=\"Filter Operator\",\n options=[\n \"equals\",\n \"not equals\",\n \"contains\",\n \"not contains\",\n \"starts with\",\n \"ends with\",\n \"greater than\",\n \"less than\",\n ],\n value=\"equals\",\n info=\"The operator to apply for filtering rows.\",\n advanced=False,\n dynamic=True,\n show=False,\n ),\n BoolInput(\n name=\"ascending\",\n display_name=\"Sort Ascending\",\n info=\"Whether to sort in ascending order.\",\n dynamic=True,\n show=False,\n value=True,\n ),\n StrInput(\n name=\"new_column_name\",\n display_name=\"New Column Name\",\n info=\"The new column name when renaming or adding a column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"new_column_value\",\n display_name=\"New Column Value\",\n info=\"The value to populate the new column with.\",\n dynamic=True,\n show=False,\n ),\n StrInput(\n name=\"columns_to_select\",\n display_name=\"Columns to Select\",\n dynamic=True,\n is_list=True,\n show=False,\n ),\n IntInput(\n name=\"num_rows\",\n display_name=\"Number of Rows\",\n info=\"Number of rows to return (for head/tail).\",\n dynamic=True,\n show=False,\n value=5,\n ),\n MessageTextInput(\n name=\"replace_value\",\n display_name=\"Value to Replace\",\n info=\"The value to replace in the column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"replacement_value\",\n display_name=\"Replacement Value\",\n info=\"The value to replace with.\",\n dynamic=True,\n show=False,\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"DataFrame\",\n name=\"output\",\n method=\"perform_operation\",\n info=\"The resulting DataFrame after the operation.\",\n )\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n dynamic_fields = [\n \"column_name\",\n \"filter_value\",\n \"filter_operator\",\n \"ascending\",\n \"new_column_name\",\n \"new_column_value\",\n \"columns_to_select\",\n \"num_rows\",\n \"replace_value\",\n \"replacement_value\",\n ]\n for field in dynamic_fields:\n build_config[field][\"show\"] = False\n\n if field_name == \"operation\":\n # Handle SortableListInput format\n if isinstance(field_value, list):\n operation_name = field_value[0].get(\"name\", \"\") if field_value else \"\"\n else:\n operation_name = field_value or \"\"\n\n # If no operation selected, all dynamic fields stay hidden (already set to False above)\n if not operation_name:\n return build_config\n\n if operation_name == \"Filter\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"filter_value\"][\"show\"] = True\n build_config[\"filter_operator\"][\"show\"] = True\n elif operation_name == \"Sort\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"ascending\"][\"show\"] = True\n elif operation_name == \"Drop Column\":\n build_config[\"column_name\"][\"show\"] = True\n elif operation_name == \"Rename Column\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"new_column_name\"][\"show\"] = True\n elif operation_name == \"Add Column\":\n build_config[\"new_column_name\"][\"show\"] = True\n build_config[\"new_column_value\"][\"show\"] = True\n elif operation_name == \"Select Columns\":\n build_config[\"columns_to_select\"][\"show\"] = True\n elif operation_name in {\"Head\", \"Tail\"}:\n build_config[\"num_rows\"][\"show\"] = True\n elif operation_name == \"Replace Value\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"replace_value\"][\"show\"] = True\n build_config[\"replacement_value\"][\"show\"] = True\n elif operation_name == \"Drop Duplicates\":\n build_config[\"column_name\"][\"show\"] = True\n\n return build_config\n\n def perform_operation(self) -> DataFrame:\n df_copy = self.df.copy()\n\n # Handle SortableListInput format for operation\n operation_input = getattr(self, \"operation\", [])\n if isinstance(operation_input, list) and len(operation_input) > 0:\n op = operation_input[0].get(\"name\", \"\")\n else:\n op = \"\"\n\n # If no operation selected, return original DataFrame\n if not op:\n return df_copy\n\n if op == \"Filter\":\n return self.filter_rows_by_value(df_copy)\n if op == \"Sort\":\n return self.sort_by_column(df_copy)\n if op == \"Drop Column\":\n return self.drop_column(df_copy)\n if op == \"Rename Column\":\n return self.rename_column(df_copy)\n if op == \"Add Column\":\n return self.add_column(df_copy)\n if op == \"Select Columns\":\n return self.select_columns(df_copy)\n if op == \"Head\":\n return self.head(df_copy)\n if op == \"Tail\":\n return self.tail(df_copy)\n if op == \"Replace Value\":\n return self.replace_values(df_copy)\n if op == \"Drop Duplicates\":\n return self.drop_duplicates(df_copy)\n msg = f\"Unsupported operation: {op}\"\n logger.error(msg)\n raise ValueError(msg)\n\n def filter_rows_by_value(self, df: DataFrame) -> DataFrame:\n column = df[self.column_name]\n filter_value = self.filter_value\n\n # Handle regular DropdownInput format (just a string value)\n operator = getattr(self, \"filter_operator\", \"equals\") # Default to equals for backward compatibility\n\n if operator == \"equals\":\n mask = column == filter_value\n elif operator == \"not equals\":\n mask = column != filter_value\n elif operator == \"contains\":\n mask = column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"not contains\":\n mask = ~column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"starts with\":\n mask = column.astype(str).str.startswith(str(filter_value), na=False)\n elif operator == \"ends with\":\n mask = column.astype(str).str.endswith(str(filter_value), na=False)\n elif operator == \"greater than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column > numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) > str(filter_value)\n elif operator == \"less than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column < numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) < str(filter_value)\n else:\n mask = column == filter_value # Fallback to equals\n\n return DataFrame(df[mask])\n\n def sort_by_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.sort_values(by=self.column_name, ascending=self.ascending))\n\n def drop_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop(columns=[self.column_name]))\n\n def rename_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.rename(columns={self.column_name: self.new_column_name}))\n\n def add_column(self, df: DataFrame) -> DataFrame:\n df[self.new_column_name] = [self.new_column_value] * len(df)\n return DataFrame(df)\n\n def select_columns(self, df: DataFrame) -> DataFrame:\n columns = [col.strip() for col in self.columns_to_select]\n return DataFrame(df[columns])\n\n def head(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.head(self.num_rows))\n\n def tail(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.tail(self.num_rows))\n\n def replace_values(self, df: DataFrame) -> DataFrame:\n df[self.column_name] = df[self.column_name].replace(self.replace_value, self.replacement_value)\n return DataFrame(df)\n\n def drop_duplicates(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop_duplicates(subset=self.column_name))\n" + "value": "import pandas as pd\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.inputs import SortableListInput\nfrom lfx.io import BoolInput, DataFrameInput, DropdownInput, IntInput, MessageTextInput, Output, StrInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dataframe import DataFrame\n\n\nclass DataFrameOperationsComponent(Component):\n display_name = \"DataFrame Operations\"\n description = \"Perform various operations on a DataFrame.\"\n documentation: str = \"https://docs.langflow.org/dataframe-operations\"\n icon = \"table\"\n name = \"DataFrameOperations\"\n\n OPERATION_CHOICES = [\n \"Add Column\",\n \"Drop Column\",\n \"Filter\",\n \"Head\",\n \"Rename Column\",\n \"Replace Value\",\n \"Select Columns\",\n \"Sort\",\n \"Tail\",\n \"Drop Duplicates\",\n ]\n\n inputs = [\n DataFrameInput(\n name=\"df\",\n display_name=\"DataFrame\",\n info=\"The input DataFrame to operate on.\",\n required=True,\n ),\n SortableListInput(\n name=\"operation\",\n display_name=\"Operation\",\n placeholder=\"Select Operation\",\n info=\"Select the DataFrame operation to perform.\",\n options=[\n {\"name\": \"Add Column\", \"icon\": \"plus\"},\n {\"name\": \"Drop Column\", \"icon\": \"minus\"},\n {\"name\": \"Filter\", \"icon\": \"filter\"},\n {\"name\": \"Head\", \"icon\": \"arrow-up\"},\n {\"name\": \"Rename Column\", \"icon\": \"pencil\"},\n {\"name\": \"Replace Value\", \"icon\": \"replace\"},\n {\"name\": \"Select Columns\", \"icon\": \"columns\"},\n {\"name\": \"Sort\", \"icon\": \"arrow-up-down\"},\n {\"name\": \"Tail\", \"icon\": \"arrow-down\"},\n {\"name\": \"Drop Duplicates\", \"icon\": \"copy-x\"},\n ],\n real_time_refresh=True,\n limit=1,\n ),\n StrInput(\n name=\"column_name\",\n display_name=\"Column Name\",\n info=\"The column name to use for the operation.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"filter_value\",\n display_name=\"Filter Value\",\n info=\"The value to filter rows by.\",\n dynamic=True,\n show=False,\n ),\n DropdownInput(\n name=\"filter_operator\",\n display_name=\"Filter Operator\",\n options=[\n \"equals\",\n \"not equals\",\n \"contains\",\n \"not contains\",\n \"starts with\",\n \"ends with\",\n \"greater than\",\n \"less than\",\n ],\n value=\"equals\",\n info=\"The operator to apply for filtering rows.\",\n advanced=False,\n dynamic=True,\n show=False,\n ),\n BoolInput(\n name=\"ascending\",\n display_name=\"Sort Ascending\",\n info=\"Whether to sort in ascending order.\",\n dynamic=True,\n show=False,\n value=True,\n ),\n StrInput(\n name=\"new_column_name\",\n display_name=\"New Column Name\",\n info=\"The new column name when renaming or adding a column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"new_column_value\",\n display_name=\"New Column Value\",\n info=\"The value to populate the new column with.\",\n dynamic=True,\n show=False,\n ),\n StrInput(\n name=\"columns_to_select\",\n display_name=\"Columns to Select\",\n dynamic=True,\n is_list=True,\n show=False,\n ),\n IntInput(\n name=\"num_rows\",\n display_name=\"Number of Rows\",\n info=\"Number of rows to return (for head/tail).\",\n dynamic=True,\n show=False,\n value=5,\n ),\n MessageTextInput(\n name=\"replace_value\",\n display_name=\"Value to Replace\",\n info=\"The value to replace in the column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"replacement_value\",\n display_name=\"Replacement Value\",\n info=\"The value to replace with.\",\n dynamic=True,\n show=False,\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"DataFrame\",\n name=\"output\",\n method=\"perform_operation\",\n info=\"The resulting DataFrame after the operation.\",\n )\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n dynamic_fields = [\n \"column_name\",\n \"filter_value\",\n \"filter_operator\",\n \"ascending\",\n \"new_column_name\",\n \"new_column_value\",\n \"columns_to_select\",\n \"num_rows\",\n \"replace_value\",\n \"replacement_value\",\n ]\n for field in dynamic_fields:\n build_config[field][\"show\"] = False\n\n if field_name == \"operation\":\n # Handle SortableListInput format\n if isinstance(field_value, list):\n operation_name = field_value[0].get(\"name\", \"\") if field_value else \"\"\n else:\n operation_name = field_value or \"\"\n\n # If no operation selected, all dynamic fields stay hidden (already set to False above)\n if not operation_name:\n return build_config\n\n if operation_name == \"Filter\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"filter_value\"][\"show\"] = True\n build_config[\"filter_operator\"][\"show\"] = True\n elif operation_name == \"Sort\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"ascending\"][\"show\"] = True\n elif operation_name == \"Drop Column\":\n build_config[\"column_name\"][\"show\"] = True\n elif operation_name == \"Rename Column\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"new_column_name\"][\"show\"] = True\n elif operation_name == \"Add Column\":\n build_config[\"new_column_name\"][\"show\"] = True\n build_config[\"new_column_value\"][\"show\"] = True\n elif operation_name == \"Select Columns\":\n build_config[\"columns_to_select\"][\"show\"] = True\n elif operation_name in {\"Head\", \"Tail\"}:\n build_config[\"num_rows\"][\"show\"] = True\n elif operation_name == \"Replace Value\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"replace_value\"][\"show\"] = True\n build_config[\"replacement_value\"][\"show\"] = True\n elif operation_name == \"Drop Duplicates\":\n build_config[\"column_name\"][\"show\"] = True\n\n return build_config\n\n def perform_operation(self) -> DataFrame:\n df_copy = self.df.copy()\n\n # Handle SortableListInput format for operation\n operation_input = getattr(self, \"operation\", [])\n if isinstance(operation_input, list) and len(operation_input) > 0:\n op = operation_input[0].get(\"name\", \"\")\n else:\n op = \"\"\n\n # If no operation selected, return original DataFrame\n if not op:\n return df_copy\n\n if op == \"Filter\":\n return self.filter_rows_by_value(df_copy)\n if op == \"Sort\":\n return self.sort_by_column(df_copy)\n if op == \"Drop Column\":\n return self.drop_column(df_copy)\n if op == \"Rename Column\":\n return self.rename_column(df_copy)\n if op == \"Add Column\":\n return self.add_column(df_copy)\n if op == \"Select Columns\":\n return self.select_columns(df_copy)\n if op == \"Head\":\n return self.head(df_copy)\n if op == \"Tail\":\n return self.tail(df_copy)\n if op == \"Replace Value\":\n return self.replace_values(df_copy)\n if op == \"Drop Duplicates\":\n return self.drop_duplicates(df_copy)\n msg = f\"Unsupported operation: {op}\"\n logger.error(msg)\n raise ValueError(msg)\n\n def filter_rows_by_value(self, df: DataFrame) -> DataFrame:\n column = df[self.column_name]\n filter_value = self.filter_value\n\n # Handle regular DropdownInput format (just a string value)\n operator = getattr(self, \"filter_operator\", \"equals\") # Default to equals for backward compatibility\n\n if operator == \"equals\":\n mask = column == filter_value\n elif operator == \"not equals\":\n mask = column != filter_value\n elif operator == \"contains\":\n mask = column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"not contains\":\n mask = ~column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"starts with\":\n mask = column.astype(str).str.startswith(str(filter_value), na=False)\n elif operator == \"ends with\":\n mask = column.astype(str).str.endswith(str(filter_value), na=False)\n elif operator == \"greater than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column > numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) > str(filter_value)\n elif operator == \"less than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column < numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) < str(filter_value)\n else:\n mask = column == filter_value # Fallback to equals\n\n return DataFrame(df[mask])\n\n def sort_by_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.sort_values(by=self.column_name, ascending=self.ascending))\n\n def drop_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop(columns=[self.column_name]))\n\n def rename_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.rename(columns={self.column_name: self.new_column_name}))\n\n def add_column(self, df: DataFrame) -> DataFrame:\n df[self.new_column_name] = [self.new_column_value] * len(df)\n return DataFrame(df)\n\n def select_columns(self, df: DataFrame) -> DataFrame:\n columns = [col.strip() for col in self.columns_to_select]\n return DataFrame(df[columns])\n\n def head(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.head(self.num_rows))\n\n def tail(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.tail(self.num_rows))\n\n def replace_values(self, df: DataFrame) -> DataFrame:\n df[self.column_name] = df[self.column_name].replace(self.replace_value, self.replacement_value)\n return DataFrame(df)\n\n def drop_duplicates(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop_duplicates(subset=self.column_name))\n" }, "column_name": { "_input_type": "StrInput", @@ -3543,12 +3053,14 @@ "list_add_label": "Add More", "load_from_db": false, "name": "column_name", + "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -3562,12 +3074,14 @@ "list_add_label": "Add More", "load_from_db": false, "name": "columns_to_select", + "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -3583,6 +3097,7 @@ "list": false, "list_add_label": "Add More", "name": "df", + "override_skip": false, "placeholder": "", "required": true, "show": true, @@ -3590,6 +3105,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "other", "value": "" }, @@ -3614,6 +3130,7 @@ "less than" ], "options_metadata": [], + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -3621,6 +3138,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "equals" }, @@ -3637,6 +3155,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "filter_value", + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -3644,9 +3163,11 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, + "is_refresh": false, "new_column_name": { "_input_type": "StrInput", "advanced": false, @@ -3657,12 +3178,14 @@ "list_add_label": "Add More", "load_from_db": false, "name": "new_column_name", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "file_size" }, @@ -3679,6 +3202,7 @@ "list_add_label": "Add More", "load_from_db": true, "name": "new_column_value", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -3686,6 +3210,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "FILESIZE" }, @@ -3698,12 +3223,14 @@ "list": false, "list_add_label": "Add More", "name": "num_rows", + "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": 5 }, @@ -3714,6 +3241,7 @@ "dynamic": false, "info": "Select the DataFrame operation to perform.", "limit": 1, + "load_from_db": false, "name": "operation", "options": [ { @@ -3757,6 +3285,7 @@ "name": "Drop Duplicates" } ], + "override_skip": false, "placeholder": "Select Operation", "real_time_refresh": true, "required": false, @@ -3765,6 +3294,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "sortableList", "value": [ { @@ -3788,6 +3318,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "replace_value", + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -3795,6 +3326,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -3811,6 +3343,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "replacement_value", + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -3818,6 +3351,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" } @@ -3834,8 +3368,8 @@ "width": 320 }, "position": { - "x": 956.3345099816677, - "y": 1077.3618931222093 + "x": 937.1310281139399, + "y": 1611.2186890450444 }, "selected": false, "type": "genericNode" @@ -3859,8 +3393,8 @@ "width": 1000 }, "position": { - "x": -538.3997974029603, - "y": 1984.9915833571447 + "x": -184.83853691310878, + "y": 944.7352701051674 }, "resizing": true, "selected": false, @@ -3869,7 +3403,725 @@ }, { "data": { - "id": "EmbeddingModel-WIe3H", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4", + "node": { + "base_classes": [ + "Data", + "DataFrame", + "VectorStore" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.", + "display_name": "OpenSearch (Multi-Model Multi-Embedding)", + "documentation": "", + "edited": false, + "field_order": [ + "docs_metadata", + "opensearch_url", + "index_name", + "engine", + "space_type", + "ef_construction", + "m", + "num_candidates", + "ingest_data", + "search_query", + "should_cache_vector_store", + "embedding", + "embedding_model_name", + "vector_field", + "number_of_results", + "filter_expression", + "auth_mode", + "username", + "password", + "jwt_token", + "jwt_header", + "bearer_prefix", + "use_ssl", + "verify_certs" + ], + "frozen": false, + "icon": "OpenSearch", + "last_updated": "2025-11-26T00:02:57.256Z", + "legacy": false, + "metadata": { + "code_hash": "8c78d799fef4", + "dependencies": { + "dependencies": [ + { + "name": "opensearchpy", + "version": "2.8.0" + }, + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 2 + }, + "module": "lfx.components.elastic.opensearch_multimodal.OpenSearchVectorStoreComponentMultimodalMultiEmbedding" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Search Results", + "group_outputs": false, + "loop_types": null, + "method": "search_documents", + "name": "search_results", + "options": null, + "required_inputs": null, + "selected": "Data", + "tool_mode": true, + "types": [ + "Data" + ], + "value": "__UNDEFINED__" + }, + { + "allows_loop": false, + "cache": true, + "display_name": "DataFrame", + "group_outputs": false, + "loop_types": null, + "method": "as_dataframe", + "name": "dataframe", + "options": null, + "required_inputs": null, + "selected": "DataFrame", + "tool_mode": true, + "types": [ + "DataFrame" + ], + "value": "__UNDEFINED__" + }, + { + "allows_loop": false, + "cache": true, + "display_name": "Vector Store Connection", + "group_outputs": false, + "hidden": false, + "loop_types": null, + "method": "as_vector_store", + "name": "vectorstoreconnection", + "options": null, + "required_inputs": null, + "selected": "VectorStore", + "tool_mode": true, + "types": [ + "VectorStore" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "5488df7c-b93f-4f87-a446-b67028bc0813" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, + "_type": "Component", + "auth_mode": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Authentication Mode", + "dynamic": false, + "external_options": {}, + "info": "Authentication method: 'basic' for username/password authentication, or 'jwt' for JSON Web Token (Bearer) authentication.", + "name": "auth_mode", + "options": [ + "basic", + "jwt" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "jwt" + }, + "bearer_prefix": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Prefix 'Bearer '", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "bearer_prefix", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n self.log(self.ingest_data)\n client = self.build_client()\n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n docs = self.ingest_data or []\n if not docs:\n self.log(\"No documents to ingest.\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n if not embeddings_list:\n msg = \"At least one embedding is required to embed documents.\"\n raise ValueError(msg)\n\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n max_workers = min(max(len(texts), 1), 8)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n return [self._get_embedding_model_name()]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models and isinstance(available_models, dict):\n logger.info(f\"Embedding object {idx} provides {len(available_models)} models via available_models dict\")\n for model_name_key, dedicated_embedding in available_models.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n else:\n # No matching embedding found for this model\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available models: {list(embedding_by_model.keys())}\"\n )\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n\n if not query_embeddings:\n msg = \"Failed to generate embeddings for any model\"\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n raw = self.search(self.search_query or \"\")\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n self.log(self.ingest_data)\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" + }, + "docs_metadata": { + "_input_type": "TableInput", + "advanced": false, + "display_name": "Document Metadata", + "dynamic": false, + "info": "Additional metadata key-value pairs to be added to all ingested documents. Useful for tagging documents with source information, categories, or other custom attributes.", + "input_types": [ + "Data" + ], + "is_list": true, + "list_add_label": "Add More", + "name": "docs_metadata", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "table_icon": "Table", + "table_schema": [ + { + "description": "Key name", + "display_name": "Key", + "formatter": "text", + "name": "key", + "type": "str" + }, + { + "description": "Value of the metadata", + "display_name": "Value", + "formatter": "text", + "name": "value", + "type": "str" + } + ], + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "trigger_icon": "Table", + "trigger_text": "Open table", + "type": "table", + "value": [] + }, + "ef_construction": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "EF Construction", + "dynamic": false, + "info": "Size of the dynamic candidate list during index construction. Higher values improve recall but increase indexing time and memory usage.", + "list": false, + "list_add_label": "Add More", + "name": "ef_construction", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 512 + }, + "embedding": { + "_input_type": "HandleInput", + "advanced": false, + "display_name": "Embedding", + "dynamic": false, + "info": "", + "input_types": [ + "Embeddings" + ], + "list": true, + "list_add_label": "Add More", + "name": "embedding", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "other", + "value": "" + }, + "embedding_model_name": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "Embedding Model Name", + "dynamic": false, + "info": "Name of the embedding model to use for ingestion. This selects which embedding from the list will be used to embed documents. Matches on deployment, model, model_id, or model_name. For duplicate deployments, use combined format: 'deployment:model' (e.g., 'text-embedding-ada-002:text-embedding-3-large'). Leave empty to use the first embedding. Error message will show all available identifiers.", + "list": false, + "list_add_label": "Add More", + "load_from_db": true, + "name": "embedding_model_name", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "SELECTED_EMBEDDING_MODEL" + }, + "engine": { + "_input_type": "DropdownInput", + "advanced": true, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Vector Engine", + "dynamic": false, + "external_options": {}, + "info": "Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.", + "name": "engine", + "options": [ + "jvector", + "nmslib", + "faiss", + "lucene" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "jvector" + }, + "filter_expression": { + "_input_type": "MultilineInput", + "advanced": false, + "ai_enabled": false, + "copy_field": false, + "display_name": "Search Filters (JSON)", + "dynamic": false, + "info": "Optional JSON configuration for search filtering, result limits, and score thresholds.\n\nFormat 1 - Explicit filters:\n{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, {\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\n\nFormat 2 - Context-style mapping:\n{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\n\nUse __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "multiline": true, + "name": "filter_expression", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "index_name": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "Index Name", + "dynamic": false, + "info": "The OpenSearch index name where documents will be stored and searched. Will be created automatically if it doesn't exist.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "index_name", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "documents" + }, + "ingest_data": { + "_input_type": "HandleInput", + "advanced": false, + "display_name": "Ingest Data", + "dynamic": false, + "info": "", + "input_types": [ + "Data", + "DataFrame" + ], + "list": true, + "list_add_label": "Add More", + "name": "ingest_data", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "other", + "value": "" + }, + "is_refresh": false, + "jwt_header": { + "_input_type": "StrInput", + "advanced": true, + "display_name": "JWT Header Name", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "jwt_header", + "override_skip": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "Authorization" + }, + "jwt_token": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "JWT Token", + "dynamic": false, + "info": "Valid JSON Web Token for authentication. Will be sent in the Authorization header (with optional 'Bearer ' prefix).", + "input_types": [], + "load_from_db": true, + "name": "jwt_token", + "override_skip": false, + "password": true, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "track_in_telemetry": false, + "type": "str", + "value": "JWT" + }, + "m": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "M Parameter", + "dynamic": false, + "info": "Number of bidirectional connections for each vector in the HNSW graph. Higher values improve search quality but increase memory usage and indexing time.", + "list": false, + "list_add_label": "Add More", + "name": "m", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 16 + }, + "num_candidates": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Candidate Pool Size", + "dynamic": false, + "info": "Number of approximate neighbors to consider for each KNN query. Some OpenSearch deployments do not support this parameter; set to 0 to disable.", + "list": false, + "list_add_label": "Add More", + "name": "num_candidates", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 1000 + }, + "number_of_results": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Default Result Limit", + "dynamic": false, + "info": "Default maximum number of search results to return when no limit is specified in the filter expression.", + "list": false, + "list_add_label": "Add More", + "name": "number_of_results", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 10 + }, + "opensearch_url": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "OpenSearch URL", + "dynamic": false, + "info": "The connection URL for your OpenSearch cluster (e.g., http://localhost:9200 for local development or your cloud endpoint).", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "opensearch_url", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "https://opensearch:9200" + }, + "password": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "OpenSearch Password", + "dynamic": false, + "info": "", + "input_types": [], + "load_from_db": false, + "name": "password", + "override_skip": false, + "password": true, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "track_in_telemetry": false, + "type": "str", + "value": "MyStrongOpenSearchPassword123!" + }, + "search_query": { + "_input_type": "QueryInput", + "advanced": false, + "display_name": "Search Query", + "dynamic": false, + "info": "Enter a query to run a similarity search.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "search_query", + "override_skip": false, + "placeholder": "Enter a query...", + "required": false, + "show": true, + "title_case": false, + "tool_mode": true, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "query", + "value": "" + }, + "should_cache_vector_store": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Cache Vector Store", + "dynamic": false, + "info": "If True, the vector store will be cached for the current build of the component. This is useful for components that have multiple output methods and want to share the same vector store.", + "list": false, + "list_add_label": "Add More", + "name": "should_cache_vector_store", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "space_type": { + "_input_type": "DropdownInput", + "advanced": true, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Distance Metric", + "dynamic": false, + "external_options": {}, + "info": "Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, 'cosinesimil' for cosine similarity, 'innerproduct' for dot product.", + "name": "space_type", + "options": [ + "l2", + "l1", + "cosinesimil", + "linf", + "innerproduct" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "l2" + }, + "use_ssl": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Use SSL/TLS", + "dynamic": false, + "info": "Enable SSL/TLS encryption for secure connections to OpenSearch.", + "list": false, + "list_add_label": "Add More", + "name": "use_ssl", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "username": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "Username", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "username", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "admin" + }, + "vector_field": { + "_input_type": "StrInput", + "advanced": true, + "display_name": "Legacy Vector Field Name", + "dynamic": false, + "info": "Legacy field name for backward compatibility. New documents use dynamic fields (chunk_embedding_{model_name}) based on the embedding_model_name.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "vector_field", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "chunk_embedding" + }, + "verify_certs": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Verify SSL Certificates", + "dynamic": false, + "info": "Verify SSL certificates when connecting. Disable for self-signed certificates in development environments.", + "list": false, + "list_add_label": "Add More", + "name": "verify_certs", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": false + } + }, + "tool_mode": false + }, + "selected_output": "search_results", + "showNode": true, + "type": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding" + }, + "dragging": false, + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4", + "measured": { + "height": 904, + "width": 320 + }, + "position": { + "x": 2261.865622928042, + "y": 1349.2821108833643 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "id": "EmbeddingModel-EAo9i", "node": { "base_classes": [ "Embeddings" @@ -3900,9 +4152,11 @@ ], "frozen": false, "icon": "binary", + "last_updated": "2025-11-26T00:02:32.604Z", "legacy": false, + "lf_version": "1.7.0", "metadata": { - "code_hash": "c5e0a4535a27", + "code_hash": "9e44c83a5058", "dependencies": { "dependencies": [ { @@ -3919,7 +4173,7 @@ }, { "name": "lfx", - "version": "0.2.0.dev19" + "version": null }, { "name": "langchain_ollama", @@ -3936,7 +4190,7 @@ ], "total_dependencies": 7 }, - "module": "custom_components.embedding_model" + "module": "lfx.components.models_and_agents.embedding_model.EmbeddingModelComponent" }, "minimized": false, "output_types": [], @@ -3946,6 +4200,7 @@ "cache": true, "display_name": "Embedding Model", "group_outputs": false, + "loop_types": null, "method": "build_embeddings", "name": "embeddings", "options": null, @@ -3960,6 +4215,12 @@ ], "pinned": false, "template": { + "_frontend_node_flow_id": { + "value": "5488df7c-b93f-4f87-a446-b67028bc0813" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, "_type": "Component", "api_base": { "_input_type": "MessageTextInput", @@ -3974,9 +4235,10 @@ "list_add_label": "Add More", "load_from_db": false, "name": "api_base", + "override_skip": false, "placeholder": "", "required": false, - "show": true, + "show": false, "title_case": false, "tool_mode": false, "trace_as_input": true, @@ -3988,12 +4250,13 @@ "api_key": { "_input_type": "SecretStrInput", "advanced": false, - "display_name": "OpenAI API Key", + "display_name": "IBM watsonx.ai API Key", "dynamic": false, "info": "Model Provider API key", "input_types": [], "load_from_db": true, "name": "api_key", + "override_skip": false, "password": true, "placeholder": "", "real_time_refresh": true, @@ -4002,7 +4265,7 @@ "title_case": false, "track_in_telemetry": false, "type": "str", - "value": "OPENAI_API_KEY" + "value": "WATSONX_API_KEY" }, "base_url_ibm_watsonx": { "_input_type": "DropdownInput", @@ -4023,10 +4286,11 @@ "https://ca-tor.ml.cloud.ibm.com" ], "options_metadata": [], + "override_skip": false, "placeholder": "", "real_time_refresh": true, "required": false, - "show": false, + "show": true, "title_case": false, "toggle": false, "tool_mode": false, @@ -4044,6 +4308,7 @@ "list": false, "list_add_label": "Add More", "name": "chunk_size", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -4070,7 +4335,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n return OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n return OllamaEmbeddings(\n model=model,\n base_url=transformed_base_url or \"http://localhost:11434\",\n **model_kwargs,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n credentials = Credentials(\n api_key=self.api_key,\n url=base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\",\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n return WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" }, "dimensions": { "_input_type": "IntInput", @@ -4081,6 +4346,7 @@ "list": false, "list_add_label": "Add More", "name": "dimensions", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -4100,9 +4366,10 @@ "list": false, "list_add_label": "Add More", "name": "input_text", + "override_skip": false, "placeholder": "", "required": false, - "show": false, + "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, @@ -4110,6 +4377,7 @@ "type": "bool", "value": true }, + "is_refresh": false, "max_retries": { "_input_type": "IntInput", "advanced": true, @@ -4119,6 +4387,7 @@ "list": false, "list_add_label": "Add More", "name": "max_retries", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -4140,11 +4409,14 @@ "info": "Select the embedding model to use", "name": "model", "options": [ - "text-embedding-3-small", - "text-embedding-3-large", - "text-embedding-ada-002" + "ibm/granite-embedding-278m-multilingual", + "ibm/slate-125m-english-rtrvr-v2", + "ibm/slate-30m-english-rtrvr-v2", + "intfloat/multilingual-e5-large", + "sentence-transformers/all-minilm-l6-v2" ], "options_metadata": [], + "override_skip": false, "placeholder": "", "real_time_refresh": true, "refresh_button": true, @@ -4156,7 +4428,7 @@ "trace_as_metadata": true, "track_in_telemetry": true, "type": "str", - "value": "text-embedding-3-small" + "value": "ibm/granite-embedding-278m-multilingual" }, "model_kwargs": { "_input_type": "DictInput", @@ -4167,6 +4439,7 @@ "list": false, "list_add_label": "Add More", "name": "model_kwargs", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -4190,6 +4463,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "ollama_base_url", + "override_skip": false, "placeholder": "", "real_time_refresh": true, "required": false, @@ -4202,6 +4476,503 @@ "type": "str", "value": "" }, + "project_id": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "Project ID", + "dynamic": false, + "info": "IBM watsonx.ai Project ID (required for IBM watsonx.ai)", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": true, + "name": "project_id", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "WATSONX_PROJECT_ID" + }, + "provider": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Model Provider", + "dynamic": false, + "external_options": {}, + "info": "Select the embedding model provider", + "name": "provider", + "options": [ + "OpenAI", + "Ollama", + "IBM watsonx.ai" + ], + "options_metadata": [ + { + "icon": "OpenAI" + }, + { + "icon": "Ollama" + }, + { + "icon": "WatsonxAI" + } + ], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "IBM watsonx.ai" + }, + "request_timeout": { + "_input_type": "FloatInput", + "advanced": true, + "display_name": "Request Timeout", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "request_timeout", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "float", + "value": "" + }, + "show_progress_bar": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Show Progress Bar", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "show_progress_bar", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": false + }, + "truncate_input_tokens": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Truncate Input Tokens", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "truncate_input_tokens", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 200 + } + }, + "tool_mode": false + }, + "showNode": true, + "type": "EmbeddingModel" + }, + "dragging": false, + "id": "EmbeddingModel-EAo9i", + "measured": { + "height": 534, + "width": 320 + }, + "position": { + "x": 838.9563350647003, + "y": 2191.523005861695 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "id": "EmbeddingModel-E0hvR", + "node": { + "base_classes": [ + "Embeddings" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Generate embeddings using a specified provider.", + "display_name": "Embedding Model", + "documentation": "https://docs.langflow.org/components-embedding-models", + "edited": false, + "field_order": [ + "provider", + "api_base", + "ollama_base_url", + "base_url_ibm_watsonx", + "model", + "api_key", + "project_id", + "dimensions", + "chunk_size", + "request_timeout", + "max_retries", + "show_progress_bar", + "model_kwargs", + "truncate_input_tokens", + "input_text" + ], + "frozen": false, + "icon": "binary", + "last_updated": "2025-11-26T00:02:32.605Z", + "legacy": false, + "lf_version": "1.7.0", + "metadata": { + "code_hash": "9e44c83a5058", + "dependencies": { + "dependencies": [ + { + "name": "requests", + "version": "2.32.5" + }, + { + "name": "ibm_watsonx_ai", + "version": "1.4.2" + }, + { + "name": "langchain_openai", + "version": "0.3.23" + }, + { + "name": "lfx", + "version": null + }, + { + "name": "langchain_ollama", + "version": "0.3.10" + }, + { + "name": "langchain_community", + "version": "0.3.21" + }, + { + "name": "langchain_ibm", + "version": "0.3.19" + } + ], + "total_dependencies": 7 + }, + "module": "lfx.components.models_and_agents.embedding_model.EmbeddingModelComponent" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Embedding Model", + "group_outputs": false, + "loop_types": null, + "method": "build_embeddings", + "name": "embeddings", + "options": null, + "required_inputs": null, + "selected": "Embeddings", + "tool_mode": true, + "types": [ + "Embeddings" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "5488df7c-b93f-4f87-a446-b67028bc0813" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, + "_type": "Component", + "api_base": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "API Base URL", + "dynamic": false, + "info": "Base URL for the API. Leave empty for default.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "api_base", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "api_key": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "API Key (Optional)", + "dynamic": false, + "info": "Model Provider API key", + "input_types": [], + "load_from_db": false, + "name": "api_key", + "override_skip": false, + "password": true, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": false, + "title_case": false, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "base_url_ibm_watsonx": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "watsonx API Endpoint", + "dynamic": false, + "external_options": {}, + "info": "The base URL of the API (IBM watsonx.ai only)", + "name": "base_url_ibm_watsonx", + "options": [ + "https://us-south.ml.cloud.ibm.com", + "https://eu-de.ml.cloud.ibm.com", + "https://eu-gb.ml.cloud.ibm.com", + "https://au-syd.ml.cloud.ibm.com", + "https://jp-tok.ml.cloud.ibm.com", + "https://ca-tor.ml.cloud.ibm.com" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": false, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "https://us-south.ml.cloud.ibm.com" + }, + "chunk_size": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Chunk Size", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "chunk_size", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 1000 + }, + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + }, + "dimensions": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Dimensions", + "dynamic": false, + "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", + "list": false, + "list_add_label": "Add More", + "name": "dimensions", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": "" + }, + "input_text": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Include the original text in the output", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "input_text", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "is_refresh": false, + "max_retries": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Max Retries", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "max_retries", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 3 + }, + "model": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Model Name", + "dynamic": false, + "external_options": {}, + "info": "Select the embedding model to use", + "name": "model", + "options": [ + "bge-large:latest", + "qwen3-embedding:4b" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "refresh_button": true, + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "bge-large:latest" + }, + "model_kwargs": { + "_input_type": "DictInput", + "advanced": true, + "display_name": "Model Kwargs", + "dynamic": false, + "info": "Additional keyword arguments to pass to the model.", + "list": false, + "list_add_label": "Add More", + "name": "model_kwargs", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "track_in_telemetry": false, + "type": "dict", + "value": {} + }, + "ollama_base_url": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "Ollama API URL", + "dynamic": false, + "info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": true, + "name": "ollama_base_url", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "OLLAMA_BASE_URL" + }, "project_id": { "_input_type": "MessageTextInput", "advanced": false, @@ -4215,6 +4986,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "project_id", + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -4252,6 +5024,505 @@ "icon": "WatsonxAI" } ], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "Ollama" + }, + "request_timeout": { + "_input_type": "FloatInput", + "advanced": true, + "display_name": "Request Timeout", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "request_timeout", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "float", + "value": "" + }, + "show_progress_bar": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Show Progress Bar", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "show_progress_bar", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": false + }, + "truncate_input_tokens": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Truncate Input Tokens", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "truncate_input_tokens", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 200 + } + }, + "tool_mode": false + }, + "showNode": true, + "type": "EmbeddingModel" + }, + "dragging": false, + "id": "EmbeddingModel-E0hvR", + "measured": { + "height": 369, + "width": 320 + }, + "position": { + "x": 1223.4804629271505, + "y": 2198.8989246514284 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "id": "EmbeddingModel-3LsIP", + "node": { + "base_classes": [ + "Embeddings" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Generate embeddings using a specified provider.", + "display_name": "Embedding Model", + "documentation": "https://docs.langflow.org/components-embedding-models", + "edited": false, + "field_order": [ + "provider", + "api_base", + "ollama_base_url", + "base_url_ibm_watsonx", + "model", + "api_key", + "project_id", + "dimensions", + "chunk_size", + "request_timeout", + "max_retries", + "show_progress_bar", + "model_kwargs", + "truncate_input_tokens", + "input_text" + ], + "frozen": false, + "icon": "binary", + "last_updated": "2025-11-26T00:02:32.606Z", + "legacy": false, + "lf_version": "1.7.0", + "metadata": { + "code_hash": "9e44c83a5058", + "dependencies": { + "dependencies": [ + { + "name": "requests", + "version": "2.32.5" + }, + { + "name": "ibm_watsonx_ai", + "version": "1.4.2" + }, + { + "name": "langchain_openai", + "version": "0.3.23" + }, + { + "name": "lfx", + "version": null + }, + { + "name": "langchain_ollama", + "version": "0.3.10" + }, + { + "name": "langchain_community", + "version": "0.3.21" + }, + { + "name": "langchain_ibm", + "version": "0.3.19" + } + ], + "total_dependencies": 7 + }, + "module": "lfx.components.models_and_agents.embedding_model.EmbeddingModelComponent" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Embedding Model", + "group_outputs": false, + "loop_types": null, + "method": "build_embeddings", + "name": "embeddings", + "options": null, + "required_inputs": null, + "selected": "Embeddings", + "tool_mode": true, + "types": [ + "Embeddings" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "5488df7c-b93f-4f87-a446-b67028bc0813" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, + "_type": "Component", + "api_base": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "API Base URL", + "dynamic": false, + "info": "Base URL for the API. Leave empty for default.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "api_base", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "api_key": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "OpenAI API Key", + "dynamic": false, + "info": "Model Provider API key", + "input_types": [], + "load_from_db": true, + "name": "api_key", + "override_skip": false, + "password": true, + "placeholder": "", + "real_time_refresh": true, + "required": true, + "show": true, + "title_case": false, + "track_in_telemetry": false, + "type": "str", + "value": "OPENAI_API_KEY" + }, + "base_url_ibm_watsonx": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "watsonx API Endpoint", + "dynamic": false, + "external_options": {}, + "info": "The base URL of the API (IBM watsonx.ai only)", + "name": "base_url_ibm_watsonx", + "options": [ + "https://us-south.ml.cloud.ibm.com", + "https://eu-de.ml.cloud.ibm.com", + "https://eu-gb.ml.cloud.ibm.com", + "https://au-syd.ml.cloud.ibm.com", + "https://jp-tok.ml.cloud.ibm.com", + "https://ca-tor.ml.cloud.ibm.com" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": false, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "https://us-south.ml.cloud.ibm.com" + }, + "chunk_size": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Chunk Size", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "chunk_size", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 1000 + }, + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + }, + "dimensions": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Dimensions", + "dynamic": false, + "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", + "list": false, + "list_add_label": "Add More", + "name": "dimensions", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": "" + }, + "input_text": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Include the original text in the output", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "input_text", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "is_refresh": false, + "max_retries": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Max Retries", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "max_retries", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 3 + }, + "model": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Model Name", + "dynamic": false, + "external_options": {}, + "info": "Select the embedding model to use", + "name": "model", + "options": [ + "text-embedding-3-small", + "text-embedding-3-large", + "text-embedding-ada-002" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "refresh_button": true, + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "text-embedding-3-small" + }, + "model_kwargs": { + "_input_type": "DictInput", + "advanced": true, + "display_name": "Model Kwargs", + "dynamic": false, + "info": "Additional keyword arguments to pass to the model.", + "list": false, + "list_add_label": "Add More", + "name": "model_kwargs", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "track_in_telemetry": false, + "type": "dict", + "value": {} + }, + "ollama_base_url": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "Ollama API URL", + "dynamic": false, + "info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "ollama_base_url", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "project_id": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "Project ID", + "dynamic": false, + "info": "IBM watsonx.ai Project ID (required for IBM watsonx.ai)", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "project_id", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "provider": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Model Provider", + "dynamic": false, + "external_options": {}, + "info": "Select the embedding model provider", + "name": "provider", + "options": [ + "OpenAI", + "Ollama", + "IBM watsonx.ai" + ], + "options_metadata": [ + { + "icon": "OpenAI" + }, + { + "icon": "Ollama" + }, + { + "icon": "WatsonxAI" + } + ], + "override_skip": false, "placeholder": "", "real_time_refresh": true, "required": false, @@ -4273,6 +5544,7 @@ "list": false, "list_add_label": "Add More", "name": "request_timeout", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -4292,6 +5564,7 @@ "list": false, "list_add_label": "Add More", "name": "show_progress_bar", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -4311,6 +5584,7 @@ "list": false, "list_add_label": "Add More", "name": "truncate_input_tokens", + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -4328,30 +5602,30 @@ "type": "EmbeddingModel" }, "dragging": false, - "id": "EmbeddingModel-WIe3H", + "id": "EmbeddingModel-3LsIP", "measured": { "height": 369, "width": 320 }, "position": { - "x": 1801.4066878627907, - "y": 1817.9066615159154 + "x": 1638.9179466145608, + "y": 2110.0422159522327 }, "selected": false, "type": "genericNode" } ], "viewport": { - "x": 354.22064006192994, - "y": -436.1821097171422, - "zoom": 0.45965778621327413 + "x": -243.6538442549829, + "y": -690.8048310343635, + "zoom": 0.5802295280329295 } }, "description": "Load your data for chat context with Retrieval Augmented Generation.", "endpoint_name": null, "id": "5488df7c-b93f-4f87-a446-b67028bc0813", "is_component": false, - "last_tested_version": "1.7.0.dev19", + "last_tested_version": "1.7.0", "name": "OpenSearch Ingestion Flow", "tags": [ "openai", diff --git a/flows/openrag_agent.json b/flows/openrag_agent.json index af0ed600..9479878c 100644 --- a/flows/openrag_agent.json +++ b/flows/openrag_agent.json @@ -1,90 +1,6 @@ { "data": { "edges": [ - { - "animated": false, - "className": "", - "data": { - "sourceHandle": { - "dataType": "TextInput", - "id": "TextInput-aHsQb", - "name": "text", - "output_types": [ - "Message" - ] - }, - "targetHandle": { - "fieldName": "filter_expression", - "id": "OpenSearch-iYfjf", - "inputTypes": [ - "Message" - ], - "type": "str" - } - }, - "id": "xy-edge__TextInput-aHsQb{œdataTypeœ:œTextInputœ,œidœ:œTextInput-aHsQbœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-OpenSearch-iYfjf{œfieldNameœ:œfilter_expressionœ,œidœ:œOpenSearch-iYfjfœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", - "selected": false, - "source": "TextInput-aHsQb", - "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-aHsQbœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", - "target": "OpenSearch-iYfjf", - "targetHandle": "{œfieldNameœ:œfilter_expressionœ,œidœ:œOpenSearch-iYfjfœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" - }, - { - "animated": false, - "className": "", - "data": { - "sourceHandle": { - "dataType": "EmbeddingModel", - "id": "EmbeddingModel-oPi95", - "name": "embeddings", - "output_types": [ - "Embeddings" - ] - }, - "targetHandle": { - "fieldName": "embedding", - "id": "OpenSearch-iYfjf", - "inputTypes": [ - "Embeddings" - ], - "type": "other" - } - }, - "id": "xy-edge__EmbeddingModel-oPi95{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-oPi95œ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-OpenSearch-iYfjf{œfieldNameœ:œembeddingœ,œidœ:œOpenSearch-iYfjfœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}", - "selected": false, - "source": "EmbeddingModel-oPi95", - "sourceHandle": "{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-oPi95œ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}", - "target": "OpenSearch-iYfjf", - "targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearch-iYfjfœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}" - }, - { - "animated": false, - "className": "", - "data": { - "sourceHandle": { - "dataType": "OpenSearchVectorStoreComponent", - "id": "OpenSearch-iYfjf", - "name": "component_as_tool", - "output_types": [ - "Tool" - ] - }, - "targetHandle": { - "fieldName": "tools", - "id": "Agent-Nfw7u", - "inputTypes": [ - "Tool" - ], - "type": "other" - } - }, - "id": "xy-edge__OpenSearch-iYfjf{œdataTypeœ:œOpenSearchVectorStoreComponentœ,œidœ:œOpenSearch-iYfjfœ,œnameœ:œcomponent_as_toolœ,œoutput_typesœ:[œToolœ]}-Agent-Nfw7u{œfieldNameœ:œtoolsœ,œidœ:œAgent-Nfw7uœ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}", - "selected": false, - "source": "OpenSearch-iYfjf", - "sourceHandle": "{œdataTypeœ:œOpenSearchVectorStoreComponentœ,œidœ:œOpenSearch-iYfjfœ,œnameœ:œcomponent_as_toolœ,œoutput_typesœ:[œToolœ]}", - "target": "Agent-Nfw7u", - "targetHandle": "{œfieldNameœ:œtoolsœ,œidœ:œAgent-Nfw7uœ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}" - }, { "animated": false, "className": "", @@ -198,678 +114,149 @@ "sourceHandle": "{œdataTypeœ:œCalculatorComponentœ,œidœ:œCalculatorComponent-KrlMHœ,œnameœ:œcomponent_as_toolœ,œoutput_typesœ:[œToolœ]}", "target": "Agent-Nfw7u", "targetHandle": "{œfieldNameœ:œtoolsœ,œidœ:œAgent-Nfw7uœ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}" + }, + { + "animated": false, + "className": "", + "data": { + "sourceHandle": { + "dataType": "EmbeddingModel", + "id": "EmbeddingModel-zLHKs", + "name": "embeddings", + "output_types": [ + "Embeddings" + ] + }, + "targetHandle": { + "fieldName": "embedding", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE", + "inputTypes": [ + "Embeddings" + ], + "type": "other" + } + }, + "id": "xy-edge__EmbeddingModel-zLHKs{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-zLHKsœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvEœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}", + "selected": false, + "source": "EmbeddingModel-zLHKs", + "sourceHandle": "{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-zLHKsœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}", + "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE", + "targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvEœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}" + }, + { + "animated": false, + "className": "", + "data": { + "sourceHandle": { + "dataType": "EmbeddingModel", + "id": "EmbeddingModel-J6YgA", + "name": "embeddings", + "output_types": [ + "Embeddings" + ] + }, + "targetHandle": { + "fieldName": "embedding", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE", + "inputTypes": [ + "Embeddings" + ], + "type": "other" + } + }, + "id": "xy-edge__EmbeddingModel-J6YgA{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-J6YgAœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvEœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}", + "selected": false, + "source": "EmbeddingModel-J6YgA", + "sourceHandle": "{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-J6YgAœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}", + "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE", + "targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvEœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}" + }, + { + "animated": false, + "className": "", + "data": { + "sourceHandle": { + "dataType": "EmbeddingModel", + "id": "EmbeddingModel-aIP4U", + "name": "embeddings", + "output_types": [ + "Embeddings" + ] + }, + "targetHandle": { + "fieldName": "embedding", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE", + "inputTypes": [ + "Embeddings" + ], + "type": "other" + } + }, + "id": "xy-edge__EmbeddingModel-aIP4U{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-aIP4Uœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvEœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}", + "selected": false, + "source": "EmbeddingModel-aIP4U", + "sourceHandle": "{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-aIP4Uœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}", + "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE", + "targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvEœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}" + }, + { + "animated": false, + "className": "", + "data": { + "sourceHandle": { + "dataType": "TextInput", + "id": "TextInput-aHsQb", + "name": "text", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "filter_expression", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE", + "inputTypes": [ + "Message" + ], + "type": "str" + } + }, + "id": "xy-edge__TextInput-aHsQb{œdataTypeœ:œTextInputœ,œidœ:œTextInput-aHsQbœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE{œfieldNameœ:œfilter_expressionœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvEœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "selected": false, + "source": "TextInput-aHsQb", + "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-aHsQbœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE", + "targetHandle": "{œfieldNameœ:œfilter_expressionœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvEœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" + }, + { + "animated": false, + "className": "", + "data": { + "sourceHandle": { + "dataType": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE", + "name": "component_as_tool", + "output_types": [ + "Tool" + ] + }, + "targetHandle": { + "fieldName": "tools", + "id": "Agent-Nfw7u", + "inputTypes": [ + "Tool" + ], + "type": "other" + } + }, + "id": "xy-edge__OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE{œdataTypeœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvEœ,œnameœ:œcomponent_as_toolœ,œoutput_typesœ:[œToolœ]}-Agent-Nfw7u{œfieldNameœ:œtoolsœ,œidœ:œAgent-Nfw7uœ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}", + "selected": false, + "source": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE", + "sourceHandle": "{œdataTypeœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvEœ,œnameœ:œcomponent_as_toolœ,œoutput_typesœ:[œToolœ]}", + "target": "Agent-Nfw7u", + "targetHandle": "{œfieldNameœ:œtoolsœ,œidœ:œAgent-Nfw7uœ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}" } ], "nodes": [ - { - "data": { - "id": "OpenSearch-iYfjf", - "node": { - "base_classes": [ - "Data", - "DataFrame", - "VectorStore" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Store and search documents using OpenSearch with hybrid semantic and keyword search capabilities.", - "display_name": "OpenSearch", - "documentation": "", - "edited": true, - "field_order": [ - "docs_metadata", - "opensearch_url", - "index_name", - "engine", - "space_type", - "ef_construction", - "m", - "ingest_data", - "search_query", - "should_cache_vector_store", - "embedding", - "vector_field", - "number_of_results", - "filter_expression", - "auth_mode", - "username", - "password", - "jwt_token", - "jwt_header", - "bearer_prefix", - "use_ssl", - "verify_certs" - ], - "frozen": false, - "icon": "OpenSearch", - "last_updated": "2025-11-24T18:02:41.464Z", - "legacy": false, - "lf_version": "1.6.0", - "metadata": { - "code_hash": "07eef12db820", - "dependencies": { - "dependencies": [ - { - "name": "opensearchpy", - "version": "2.8.0" - }, - { - "name": "lfx", - "version": null - } - ], - "total_dependencies": 2 - }, - "module": "custom_components.opensearch" - }, - "minimized": false, - "output_types": [], - "outputs": [ - { - "allows_loop": false, - "cache": true, - "display_name": "Toolset", - "group_outputs": false, - "hidden": null, - "method": "to_toolkit", - "name": "component_as_tool", - "options": null, - "required_inputs": null, - "selected": "Tool", - "tool_mode": true, - "types": [ - "Tool" - ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "auth_mode": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Authentication Mode", - "dynamic": false, - "external_options": {}, - "info": "Authentication method: 'basic' for username/password authentication, or 'jwt' for JSON Web Token (Bearer) authentication.", - "load_from_db": false, - "name": "auth_mode", - "options": [ - "basic", - "jwt" - ], - "options_metadata": [], - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "jwt" - }, - "bearer_prefix": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Prefix 'Bearer '", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "name": "bearer_prefix", - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "bool", - "value": true - }, - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "code", - "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom typing import Any, List, Optional\n\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponent(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"]\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"]),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model being used (e.g., 'text-embedding-3-small'). \"\n \"Used to create dynamic vector field names and track which model embedded each document. \"\n \"Auto-detected from embedding component if not specified.\"\n ),\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=False,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=False,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=True,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from embedding component\n if hasattr(self, \"embedding\") and self.embedding:\n if hasattr(self.embedding, \"model\"):\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_name\"):\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'model' or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\n \"type\": \"keyword\"\n },\n \"embedding_dimensions\": {\n \"type\": \"integer\"\n }\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n raise ValueError(\n f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n )\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n self.log(self.ingest_data)\n client = self.build_client()\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n docs = self.ingest_data or []\n if not docs:\n self.log(\"No documents to ingest.\")\n return\n\n # Get embedding model name\n embedding_model = self._get_embedding_model_name()\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n self.log(f\"Using embedding model: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return self.embedding.embed_documents([chunk_text])[0]\n\n vectors: Optional[List[List[float]]] = None\n last_exception: Optional[Exception] = None\n delay = 1.0\n attempts = 0\n\n while attempts < 3:\n attempts += 1\n try:\n max_workers = min(max(len(texts), 1), 8)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= 3:\n logger.error(\n \"Embedding generation failed after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed (attempt %s/%s), retrying in %.1fs\",\n attempts,\n 3,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed: {last_exception}\" if last_exception else \"Embedding generation failed\"\n )\n\n if not vectors:\n self.log(\"No vectors generated from documents.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(\n f\"Failed to create index '{self.index_name}': {creation_error}\"\n )\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\n \"size\": 0,\n \"aggs\": {\n \"embedding_models\": {\n \"terms\": {\n \"field\": \"embedding_model\",\n \"size\": 10\n }\n }\n }\n }\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\n \"bool\": {\n \"filter\": filter_clauses\n }\n }\n\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n return models\n except Exception as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n return [self._get_embedding_model_name()]\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except Exception as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(\n f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\"\n )\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return True\n\n return False\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models in parallel\n query_embeddings = {}\n\n # Note: Langflow is synchronous, so we can't use true async here\n # But we log the intent for parallel processing\n logger.info(f\"Generating embeddings for {len(available_models)} models\")\n\n original_model_attr = getattr(self.embedding, \"model\", None)\n original_deployment_attr = getattr(self.embedding, \"deployment\", None)\n original_dimensions_attr = getattr(self.embedding, \"dimensions\", None)\n\n for model_name in available_models:\n try:\n # In a real async environment, these would run in parallel\n # For now, they run sequentially\n if hasattr(self.embedding, \"model\"):\n setattr(self.embedding, \"model\", model_name)\n if hasattr(self.embedding, \"deployment\"):\n setattr(self.embedding, \"deployment\", model_name)\n if hasattr(self.embedding, \"dimensions\"):\n setattr(self.embedding, \"dimensions\", None)\n vec = self.embedding.embed_query(q)\n query_embeddings[model_name] = vec\n logger.info(f\"Generated embedding for model: {model_name}\")\n except Exception as e:\n logger.error(f\"Failed to generate embedding for {model_name}: {e}\")\n\n if hasattr(self.embedding, \"model\"):\n setattr(self.embedding, \"model\", original_model_attr)\n if hasattr(self.embedding, \"deployment\"):\n setattr(self.embedding, \"deployment\", original_deployment_attr)\n if hasattr(self.embedding, \"dimensions\"):\n setattr(self.embedding, \"dimensions\", original_dimensions_attr)\n\n if not query_embeddings:\n msg = \"Failed to generate embeddings for any model\"\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n continue\n\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\n \"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)],\n \"minimum_should_match\": 1\n }\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models\"\n )\n\n try:\n resp = client.search(\n index=self.index_name, body=body, params={\"terminate_after\": 0}\n )\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n raw = self.search(self.search_query or \"\")\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n self.log(self.ingest_data)\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n if is_basic:\n build_config[\"jwt_token\"][\"value\"] = \"\"\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" - }, - "docs_metadata": { - "_input_type": "TableInput", - "advanced": false, - "display_name": "Document Metadata", - "dynamic": false, - "info": "Additional metadata key-value pairs to be added to all ingested documents. Useful for tagging documents with source information, categories, or other custom attributes.", - "input_types": [ - "Data" - ], - "is_list": true, - "list_add_label": "Add More", - "name": "docs_metadata", - "placeholder": "", - "required": false, - "show": true, - "table_icon": "Table", - "table_schema": [ - { - "description": "Key name", - "display_name": "Key", - "formatter": "text", - "name": "key", - "type": "str" - }, - { - "description": "Value of the metadata", - "display_name": "Value", - "formatter": "text", - "name": "value", - "type": "str" - } - ], - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "trigger_icon": "Table", - "trigger_text": "Open table", - "type": "table", - "value": [] - }, - "ef_construction": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "EF Construction", - "dynamic": false, - "info": "Size of the dynamic candidate list during index construction. Higher values improve recall but increase indexing time and memory usage.", - "list": false, - "list_add_label": "Add More", - "name": "ef_construction", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "int", - "value": 512 - }, - "embedding": { - "_input_type": "HandleInput", - "advanced": false, - "display_name": "Embedding", - "dynamic": false, - "info": "", - "input_types": [ - "Embeddings" - ], - "list": false, - "list_add_label": "Add More", - "name": "embedding", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_metadata": true, - "type": "other", - "value": "" - }, - "engine": { - "_input_type": "DropdownInput", - "advanced": true, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Vector Engine", - "dynamic": false, - "external_options": {}, - "info": "Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.", - "name": "engine", - "options": [ - "jvector", - "nmslib", - "faiss", - "lucene" - ], - "options_metadata": [], - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "jvector" - }, - "filter_expression": { - "_input_type": "MultilineInput", - "advanced": false, - "copy_field": false, - "display_name": "Search Filters (JSON)", - "dynamic": false, - "info": "Optional JSON configuration for search filtering, result limits, and score thresholds.\n\nFormat 1 - Explicit filters:\n{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, {\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\n\nFormat 2 - Context-style mapping:\n{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\n\nUse __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "multiline": true, - "name": "filter_expression", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "index_name": { - "_input_type": "StrInput", - "advanced": false, - "display_name": "Index Name", - "dynamic": false, - "info": "The OpenSearch index name where documents will be stored and searched. Will be created automatically if it doesn't exist.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "index_name", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "documents" - }, - "ingest_data": { - "_input_type": "HandleInput", - "advanced": false, - "display_name": "Ingest Data", - "dynamic": false, - "info": "", - "input_types": [ - "Data", - "DataFrame" - ], - "list": true, - "list_add_label": "Add More", - "name": "ingest_data", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_metadata": true, - "type": "other", - "value": "" - }, - "jwt_header": { - "_input_type": "StrInput", - "advanced": true, - "display_name": "JWT Header Name", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "jwt_header", - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "Authorization" - }, - "jwt_token": { - "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "JWT Token", - "dynamic": false, - "info": "Valid JSON Web Token for authentication. Will be sent in the Authorization header (with optional 'Bearer ' prefix).", - "input_types": [], - "load_from_db": true, - "name": "jwt_token", - "password": true, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "JWT" - }, - "m": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "M Parameter", - "dynamic": false, - "info": "Number of bidirectional connections for each vector in the HNSW graph. Higher values improve search quality but increase memory usage and indexing time.", - "list": false, - "list_add_label": "Add More", - "name": "m", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "int", - "value": 16 - }, - "number_of_results": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "Default Result Limit", - "dynamic": false, - "info": "Default maximum number of search results to return when no limit is specified in the filter expression.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "number_of_results", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "int", - "value": 4 - }, - "opensearch_url": { - "_input_type": "StrInput", - "advanced": false, - "display_name": "OpenSearch URL", - "dynamic": false, - "info": "The connection URL for your OpenSearch cluster (e.g., http://localhost:9200 for local development or your cloud endpoint).", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "opensearch_url", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "https://opensearch:9200" - }, - "password": { - "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "OpenSearch Password", - "dynamic": false, - "info": "", - "input_types": [], - "load_from_db": false, - "name": "password", - "password": true, - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "type": "str", - "value": "" - }, - "search_query": { - "_input_type": "QueryInput", - "advanced": false, - "display_name": "Search Query", - "dynamic": false, - "info": "Enter a query to run a similarity search.", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "search_query", - "placeholder": "Enter a query...", - "required": false, - "show": true, - "title_case": false, - "tool_mode": true, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "query", - "value": "" - }, - "should_cache_vector_store": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Cache Vector Store", - "dynamic": false, - "info": "If True, the vector store will be cached for the current build of the component. This is useful for components that have multiple output methods and want to share the same vector store.", - "list": false, - "list_add_label": "Add More", - "name": "should_cache_vector_store", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "bool", - "value": true - }, - "space_type": { - "_input_type": "DropdownInput", - "advanced": true, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Distance Metric", - "dynamic": false, - "external_options": {}, - "info": "Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, 'cosinesimil' for cosine similarity, 'innerproduct' for dot product.", - "name": "space_type", - "options": [ - "l2", - "l1", - "cosinesimil", - "linf", - "innerproduct" - ], - "options_metadata": [], - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "l2" - }, - "tools_metadata": { - "_input_type": "ToolsInput", - "advanced": false, - "display_name": "Actions", - "dynamic": false, - "info": "Modify tool names and descriptions to help agents understand when to use each tool.", - "is_list": true, - "list_add_label": "Add More", - "name": "tools_metadata", - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "tools", - "value": [ - { - "args": { - "search_query": { - "default": "", - "description": "Enter a query to run a similarity search.", - "title": "Search Query", - "type": "string" - } - }, - "description": "Store and search documents using OpenSearch with hybrid semantic and keyword search capabilities.", - "display_description": "Store and search documents using OpenSearch with hybrid semantic and keyword search capabilities.", - "display_name": "search_documents", - "name": "search_documents", - "readonly": false, - "status": true, - "tags": [ - "search_documents" - ] - }, - { - "args": { - "search_query": { - "default": "", - "description": "Enter a query to run a similarity search.", - "title": "Search Query", - "type": "string" - } - }, - "description": "Store and search documents using OpenSearch with hybrid semantic and keyword search capabilities.", - "display_description": "Store and search documents using OpenSearch with hybrid semantic and keyword search capabilities.", - "display_name": "as_dataframe", - "name": "as_dataframe", - "readonly": false, - "status": false, - "tags": [ - "as_dataframe" - ] - }, - { - "args": { - "search_query": { - "default": "", - "description": "Enter a query to run a similarity search.", - "title": "Search Query", - "type": "string" - } - }, - "description": "Store and search documents using OpenSearch with hybrid semantic and keyword search capabilities.", - "display_description": "Store and search documents using OpenSearch with hybrid semantic and keyword search capabilities.", - "display_name": "as_vector_store", - "name": "as_vector_store", - "readonly": false, - "status": false, - "tags": [ - "as_vector_store" - ] - } - ] - }, - "use_ssl": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Use SSL/TLS", - "dynamic": false, - "info": "Enable SSL/TLS encryption for secure connections to OpenSearch.", - "list": false, - "list_add_label": "Add More", - "name": "use_ssl", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "bool", - "value": true - }, - "username": { - "_input_type": "StrInput", - "advanced": false, - "display_name": "Username", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "username", - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "admin" - }, - "vector_field": { - "_input_type": "StrInput", - "advanced": true, - "display_name": "Vector Field Name", - "dynamic": false, - "info": "Name of the field in OpenSearch documents that stores the vector embeddings for similarity search.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "vector_field", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "chunk_embedding" - }, - "verify_certs": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Verify SSL Certificates", - "dynamic": false, - "info": "Verify SSL certificates when connecting. Disable for self-signed certificates in development environments.", - "list": false, - "list_add_label": "Add More", - "name": "verify_certs", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "bool", - "value": false - } - }, - "tool_mode": true - }, - "selected_output": "search_results", - "showNode": true, - "type": "OpenSearchVectorStoreComponent" - }, - "dragging": false, - "id": "OpenSearch-iYfjf", - "measured": { - "height": 820, - "width": 320 - }, - "position": { - "x": 1183.2560374129, - "y": 320.1264495479339 - }, - "selected": false, - "type": "genericNode" - }, { "data": { "id": "TextInput-aHsQb", @@ -964,8 +351,8 @@ "width": 320 }, "position": { - "x": 722.0477041311764, - "y": 119.75705309395346 + "x": 503.8866998170472, + "y": 2288.794090320999 }, "selected": false, "type": "genericNode" @@ -994,7 +381,7 @@ "frozen": false, "icon": "Mcp", "key": "mcp_lf-starter_project", - "last_updated": "2025-11-24T18:02:41.465Z", + "last_updated": "2025-11-26T00:05:16.024Z", "legacy": false, "mcpServerName": "lf-starter_project", "metadata": { @@ -1027,6 +414,7 @@ "display_name": "Toolset", "group_outputs": false, "hidden": null, + "loop_types": null, "method": "to_toolkit", "name": "component_as_tool", "options": null, @@ -1041,6 +429,12 @@ ], "pinned": false, "template": { + "_frontend_node_flow_id": { + "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, "_type": "Component", "code": { "advanced": true, @@ -1060,6 +454,7 @@ "type": "code", "value": "from __future__ import annotations\n\nimport asyncio\nimport uuid\nfrom typing import Any\n\nfrom langchain_core.tools import StructuredTool # noqa: TC002\n\nfrom lfx.base.agents.utils import maybe_unflatten_dict, safe_cache_get, safe_cache_set\nfrom lfx.base.mcp.util import MCPSseClient, MCPStdioClient, create_input_schema_from_json_schema, update_tools\nfrom lfx.custom.custom_component.component_with_cache import ComponentWithCache\nfrom lfx.inputs.inputs import InputTypes # noqa: TC001\nfrom lfx.io import BoolInput, DropdownInput, McpInput, MessageTextInput, Output\nfrom lfx.io.schema import flatten_schema, schema_to_langflow_inputs\nfrom lfx.log.logger import logger\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.services.deps import get_settings_service, get_storage_service, session_scope\n\n\nclass MCPToolsComponent(ComponentWithCache):\n schema_inputs: list = []\n tools: list[StructuredTool] = []\n _not_load_actions: bool = False\n _tool_cache: dict = {}\n _last_selected_server: str | None = None # Cache for the last selected server\n\n def __init__(self, **data) -> None:\n super().__init__(**data)\n # Initialize cache keys to avoid CacheMiss when accessing them\n self._ensure_cache_structure()\n\n # Initialize clients with access to the component cache\n self.stdio_client: MCPStdioClient = MCPStdioClient(component_cache=self._shared_component_cache)\n self.sse_client: MCPSseClient = MCPSseClient(component_cache=self._shared_component_cache)\n\n def _ensure_cache_structure(self):\n \"\"\"Ensure the cache has the required structure.\"\"\"\n # Check if servers key exists and is not CacheMiss\n servers_value = safe_cache_get(self._shared_component_cache, \"servers\")\n if servers_value is None:\n safe_cache_set(self._shared_component_cache, \"servers\", {})\n\n # Check if last_selected_server key exists and is not CacheMiss\n last_server_value = safe_cache_get(self._shared_component_cache, \"last_selected_server\")\n if last_server_value is None:\n safe_cache_set(self._shared_component_cache, \"last_selected_server\", \"\")\n\n default_keys: list[str] = [\n \"code\",\n \"_type\",\n \"tool_mode\",\n \"tool_placeholder\",\n \"mcp_server\",\n \"tool\",\n \"use_cache\",\n ]\n\n display_name = \"MCP Tools\"\n description = \"Connect to an MCP server to use its tools.\"\n documentation: str = \"https://docs.langflow.org/mcp-client\"\n icon = \"Mcp\"\n name = \"MCPTools\"\n\n inputs = [\n McpInput(\n name=\"mcp_server\",\n display_name=\"MCP Server\",\n info=\"Select the MCP Server that will be used by this component\",\n real_time_refresh=True,\n ),\n BoolInput(\n name=\"use_cache\",\n display_name=\"Use Cached Server\",\n info=(\n \"Enable caching of MCP Server and tools to improve performance. \"\n \"Disable to always fetch fresh tools and server updates.\"\n ),\n value=False,\n advanced=True,\n ),\n DropdownInput(\n name=\"tool\",\n display_name=\"Tool\",\n options=[],\n value=\"\",\n info=\"Select the tool to execute\",\n show=False,\n required=True,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n info=\"Placeholder for the tool\",\n value=\"\",\n show=False,\n tool_mode=False,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Response\", name=\"response\", method=\"build_output\"),\n ]\n\n async def _validate_schema_inputs(self, tool_obj) -> list[InputTypes]:\n \"\"\"Validate and process schema inputs for a tool.\"\"\"\n try:\n if not tool_obj or not hasattr(tool_obj, \"args_schema\"):\n msg = \"Invalid tool object or missing input schema\"\n raise ValueError(msg)\n\n flat_schema = flatten_schema(tool_obj.args_schema.schema())\n input_schema = create_input_schema_from_json_schema(flat_schema)\n if not input_schema:\n msg = f\"Empty input schema for tool '{tool_obj.name}'\"\n raise ValueError(msg)\n\n schema_inputs = schema_to_langflow_inputs(input_schema)\n if not schema_inputs:\n msg = f\"No input parameters defined for tool '{tool_obj.name}'\"\n await logger.awarning(msg)\n return []\n\n except Exception as e:\n msg = f\"Error validating schema inputs: {e!s}\"\n await logger.aexception(msg)\n raise ValueError(msg) from e\n else:\n return schema_inputs\n\n async def update_tool_list(self, mcp_server_value=None):\n # Accepts mcp_server_value as dict {name, config} or uses self.mcp_server\n mcp_server = mcp_server_value if mcp_server_value is not None else getattr(self, \"mcp_server\", None)\n server_name = None\n server_config_from_value = None\n if isinstance(mcp_server, dict):\n server_name = mcp_server.get(\"name\")\n server_config_from_value = mcp_server.get(\"config\")\n else:\n server_name = mcp_server\n if not server_name:\n self.tools = []\n return [], {\"name\": server_name, \"config\": server_config_from_value}\n\n # Check if caching is enabled, default to False\n use_cache = getattr(self, \"use_cache\", False)\n\n # Use shared cache if available and caching is enabled\n cached = None\n if use_cache:\n servers_cache = safe_cache_get(self._shared_component_cache, \"servers\", {})\n cached = servers_cache.get(server_name) if isinstance(servers_cache, dict) else None\n\n if cached is not None:\n try:\n self.tools = cached[\"tools\"]\n self.tool_names = cached[\"tool_names\"]\n self._tool_cache = cached[\"tool_cache\"]\n server_config_from_value = cached[\"config\"]\n except (TypeError, KeyError, AttributeError) as e:\n # Handle corrupted cache data by clearing it and continuing to fetch fresh tools\n msg = f\"Unable to use cached data for MCP Server{server_name}: {e}\"\n await logger.awarning(msg)\n # Clear the corrupted cache entry\n current_servers_cache = safe_cache_get(self._shared_component_cache, \"servers\", {})\n if isinstance(current_servers_cache, dict) and server_name in current_servers_cache:\n current_servers_cache.pop(server_name)\n safe_cache_set(self._shared_component_cache, \"servers\", current_servers_cache)\n else:\n return self.tools, {\"name\": server_name, \"config\": server_config_from_value}\n\n try:\n try:\n from langflow.api.v2.mcp import get_server\n from langflow.services.database.models.user.crud import get_user_by_id\n except ImportError as e:\n msg = (\n \"Langflow MCP server functionality is not available. \"\n \"This feature requires the full Langflow installation.\"\n )\n raise ImportError(msg) from e\n async with session_scope() as db:\n if not self.user_id:\n msg = \"User ID is required for fetching MCP tools.\"\n raise ValueError(msg)\n current_user = await get_user_by_id(db, self.user_id)\n\n # Try to get server config from DB/API\n server_config = await get_server(\n server_name,\n current_user,\n db,\n storage_service=get_storage_service(),\n settings_service=get_settings_service(),\n )\n\n # If get_server returns empty but we have a config, use it\n if not server_config and server_config_from_value:\n server_config = server_config_from_value\n\n if not server_config:\n self.tools = []\n return [], {\"name\": server_name, \"config\": server_config}\n\n _, tool_list, tool_cache = await update_tools(\n server_name=server_name,\n server_config=server_config,\n mcp_stdio_client=self.stdio_client,\n mcp_sse_client=self.sse_client,\n )\n\n self.tool_names = [tool.name for tool in tool_list if hasattr(tool, \"name\")]\n self._tool_cache = tool_cache\n self.tools = tool_list\n\n # Cache the result only if caching is enabled\n if use_cache:\n cache_data = {\n \"tools\": tool_list,\n \"tool_names\": self.tool_names,\n \"tool_cache\": tool_cache,\n \"config\": server_config,\n }\n\n # Safely update the servers cache\n current_servers_cache = safe_cache_get(self._shared_component_cache, \"servers\", {})\n if isinstance(current_servers_cache, dict):\n current_servers_cache[server_name] = cache_data\n safe_cache_set(self._shared_component_cache, \"servers\", current_servers_cache)\n\n except (TimeoutError, asyncio.TimeoutError) as e:\n msg = f\"Timeout updating tool list: {e!s}\"\n await logger.aexception(msg)\n raise TimeoutError(msg) from e\n except Exception as e:\n msg = f\"Error updating tool list: {e!s}\"\n await logger.aexception(msg)\n raise ValueError(msg) from e\n else:\n return tool_list, {\"name\": server_name, \"config\": server_config}\n\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Toggle the visibility of connection-specific fields based on the selected mode.\"\"\"\n try:\n if field_name == \"tool\":\n try:\n if len(self.tools) == 0:\n try:\n self.tools, build_config[\"mcp_server\"][\"value\"] = await self.update_tool_list()\n build_config[\"tool\"][\"options\"] = [tool.name for tool in self.tools]\n build_config[\"tool\"][\"placeholder\"] = \"Select a tool\"\n except (TimeoutError, asyncio.TimeoutError) as e:\n msg = f\"Timeout updating tool list: {e!s}\"\n await logger.aexception(msg)\n if not build_config[\"tools_metadata\"][\"show\"]:\n build_config[\"tool\"][\"show\"] = True\n build_config[\"tool\"][\"options\"] = []\n build_config[\"tool\"][\"value\"] = \"\"\n build_config[\"tool\"][\"placeholder\"] = \"Timeout on MCP server\"\n else:\n build_config[\"tool\"][\"show\"] = False\n except ValueError:\n if not build_config[\"tools_metadata\"][\"show\"]:\n build_config[\"tool\"][\"show\"] = True\n build_config[\"tool\"][\"options\"] = []\n build_config[\"tool\"][\"value\"] = \"\"\n build_config[\"tool\"][\"placeholder\"] = \"Error on MCP Server\"\n else:\n build_config[\"tool\"][\"show\"] = False\n\n if field_value == \"\":\n return build_config\n tool_obj = None\n for tool in self.tools:\n if tool.name == field_value:\n tool_obj = tool\n break\n if tool_obj is None:\n msg = f\"Tool {field_value} not found in available tools: {self.tools}\"\n await logger.awarning(msg)\n return build_config\n await self._update_tool_config(build_config, field_value)\n except Exception as e:\n build_config[\"tool\"][\"options\"] = []\n msg = f\"Failed to update tools: {e!s}\"\n raise ValueError(msg) from e\n else:\n return build_config\n elif field_name == \"mcp_server\":\n if not field_value:\n build_config[\"tool\"][\"show\"] = False\n build_config[\"tool\"][\"options\"] = []\n build_config[\"tool\"][\"value\"] = \"\"\n build_config[\"tool\"][\"placeholder\"] = \"\"\n build_config[\"tool_placeholder\"][\"tool_mode\"] = False\n self.remove_non_default_keys(build_config)\n return build_config\n\n build_config[\"tool_placeholder\"][\"tool_mode\"] = True\n\n current_server_name = field_value.get(\"name\") if isinstance(field_value, dict) else field_value\n _last_selected_server = safe_cache_get(self._shared_component_cache, \"last_selected_server\", \"\")\n\n # To avoid unnecessary updates, only proceed if the server has actually changed\n if (_last_selected_server in (current_server_name, \"\")) and build_config[\"tool\"][\"show\"]:\n if current_server_name:\n servers_cache = safe_cache_get(self._shared_component_cache, \"servers\", {})\n if isinstance(servers_cache, dict):\n cached = servers_cache.get(current_server_name)\n if cached is not None and cached.get(\"tool_names\"):\n cached_tools = cached[\"tool_names\"]\n current_tools = build_config[\"tool\"][\"options\"]\n if current_tools == cached_tools:\n return build_config\n else:\n return build_config\n\n # Determine if \"Tool Mode\" is active by checking if the tool dropdown is hidden.\n is_in_tool_mode = build_config[\"tools_metadata\"][\"show\"]\n safe_cache_set(self._shared_component_cache, \"last_selected_server\", current_server_name)\n\n # Check if tools are already cached for this server before clearing\n cached_tools = None\n if current_server_name:\n use_cache = getattr(self, \"use_cache\", True)\n if use_cache:\n servers_cache = safe_cache_get(self._shared_component_cache, \"servers\", {})\n if isinstance(servers_cache, dict):\n cached = servers_cache.get(current_server_name)\n if cached is not None:\n try:\n cached_tools = cached[\"tools\"]\n self.tools = cached_tools\n self.tool_names = cached[\"tool_names\"]\n self._tool_cache = cached[\"tool_cache\"]\n except (TypeError, KeyError, AttributeError) as e:\n # Handle corrupted cache data by ignoring it\n msg = f\"Unable to use cached data for MCP Server,{current_server_name}: {e}\"\n await logger.awarning(msg)\n cached_tools = None\n\n # Only clear tools if we don't have cached tools for the current server\n if not cached_tools:\n self.tools = [] # Clear previous tools only if no cache\n\n self.remove_non_default_keys(build_config) # Clear previous tool inputs\n\n # Only show the tool dropdown if not in tool_mode\n if not is_in_tool_mode:\n build_config[\"tool\"][\"show\"] = True\n if cached_tools:\n # Use cached tools to populate options immediately\n build_config[\"tool\"][\"options\"] = [tool.name for tool in cached_tools]\n build_config[\"tool\"][\"placeholder\"] = \"Select a tool\"\n else:\n # Show loading state only when we need to fetch tools\n build_config[\"tool\"][\"placeholder\"] = \"Loading tools...\"\n build_config[\"tool\"][\"options\"] = []\n build_config[\"tool\"][\"value\"] = uuid.uuid4()\n else:\n # Keep the tool dropdown hidden if in tool_mode\n self._not_load_actions = True\n build_config[\"tool\"][\"show\"] = False\n\n elif field_name == \"tool_mode\":\n build_config[\"tool\"][\"placeholder\"] = \"\"\n build_config[\"tool\"][\"show\"] = not bool(field_value) and bool(build_config[\"mcp_server\"])\n self.remove_non_default_keys(build_config)\n self.tool = build_config[\"tool\"][\"value\"]\n if field_value:\n self._not_load_actions = True\n else:\n build_config[\"tool\"][\"value\"] = uuid.uuid4()\n build_config[\"tool\"][\"options\"] = []\n build_config[\"tool\"][\"show\"] = True\n build_config[\"tool\"][\"placeholder\"] = \"Loading tools...\"\n elif field_name == \"tools_metadata\":\n self._not_load_actions = False\n\n except Exception as e:\n msg = f\"Error in update_build_config: {e!s}\"\n await logger.aexception(msg)\n raise ValueError(msg) from e\n else:\n return build_config\n\n def get_inputs_for_all_tools(self, tools: list) -> dict:\n \"\"\"Get input schemas for all tools.\"\"\"\n inputs = {}\n for tool in tools:\n if not tool or not hasattr(tool, \"name\"):\n continue\n try:\n flat_schema = flatten_schema(tool.args_schema.schema())\n input_schema = create_input_schema_from_json_schema(flat_schema)\n langflow_inputs = schema_to_langflow_inputs(input_schema)\n inputs[tool.name] = langflow_inputs\n except (AttributeError, ValueError, TypeError, KeyError) as e:\n msg = f\"Error getting inputs for tool {getattr(tool, 'name', 'unknown')}: {e!s}\"\n logger.exception(msg)\n continue\n return inputs\n\n def remove_input_schema_from_build_config(\n self, build_config: dict, tool_name: str, input_schema: dict[list[InputTypes], Any]\n ):\n \"\"\"Remove the input schema for the tool from the build config.\"\"\"\n # Keep only schemas that don't belong to the current tool\n input_schema = {k: v for k, v in input_schema.items() if k != tool_name}\n # Remove all inputs from other tools\n for value in input_schema.values():\n for _input in value:\n if _input.name in build_config:\n build_config.pop(_input.name)\n\n def remove_non_default_keys(self, build_config: dict) -> None:\n \"\"\"Remove non-default keys from the build config.\"\"\"\n for key in list(build_config.keys()):\n if key not in self.default_keys:\n build_config.pop(key)\n\n async def _update_tool_config(self, build_config: dict, tool_name: str) -> None:\n \"\"\"Update tool configuration with proper error handling.\"\"\"\n if not self.tools:\n self.tools, build_config[\"mcp_server\"][\"value\"] = await self.update_tool_list()\n\n if not tool_name:\n return\n\n tool_obj = next((tool for tool in self.tools if tool.name == tool_name), None)\n if not tool_obj:\n msg = f\"Tool {tool_name} not found in available tools: {self.tools}\"\n self.remove_non_default_keys(build_config)\n build_config[\"tool\"][\"value\"] = \"\"\n await logger.awarning(msg)\n return\n\n try:\n # Store current values before removing inputs\n current_values = {}\n for key, value in build_config.items():\n if key not in self.default_keys and isinstance(value, dict) and \"value\" in value:\n current_values[key] = value[\"value\"]\n\n # Get all tool inputs and remove old ones\n input_schema_for_all_tools = self.get_inputs_for_all_tools(self.tools)\n self.remove_input_schema_from_build_config(build_config, tool_name, input_schema_for_all_tools)\n\n # Get and validate new inputs\n self.schema_inputs = await self._validate_schema_inputs(tool_obj)\n if not self.schema_inputs:\n msg = f\"No input parameters to configure for tool '{tool_name}'\"\n await logger.ainfo(msg)\n return\n\n # Add new inputs to build config\n for schema_input in self.schema_inputs:\n if not schema_input or not hasattr(schema_input, \"name\"):\n msg = \"Invalid schema input detected, skipping\"\n await logger.awarning(msg)\n continue\n\n try:\n name = schema_input.name\n input_dict = schema_input.to_dict()\n input_dict.setdefault(\"value\", None)\n input_dict.setdefault(\"required\", True)\n\n build_config[name] = input_dict\n\n # Preserve existing value if the parameter name exists in current_values\n if name in current_values:\n build_config[name][\"value\"] = current_values[name]\n\n except (AttributeError, KeyError, TypeError) as e:\n msg = f\"Error processing schema input {schema_input}: {e!s}\"\n await logger.aexception(msg)\n continue\n except ValueError as e:\n msg = f\"Schema validation error for tool {tool_name}: {e!s}\"\n await logger.aexception(msg)\n self.schema_inputs = []\n return\n except (AttributeError, KeyError, TypeError) as e:\n msg = f\"Error updating tool config: {e!s}\"\n await logger.aexception(msg)\n raise ValueError(msg) from e\n\n async def build_output(self) -> DataFrame:\n \"\"\"Build output with improved error handling and validation.\"\"\"\n try:\n self.tools, _ = await self.update_tool_list()\n if self.tool != \"\":\n # Set session context for persistent MCP sessions using Langflow session ID\n session_context = self._get_session_context()\n if session_context:\n self.stdio_client.set_session_context(session_context)\n self.sse_client.set_session_context(session_context)\n\n exec_tool = self._tool_cache[self.tool]\n tool_args = self.get_inputs_for_all_tools(self.tools)[self.tool]\n kwargs = {}\n for arg in tool_args:\n value = getattr(self, arg.name, None)\n if value is not None:\n if isinstance(value, Message):\n kwargs[arg.name] = value.text\n else:\n kwargs[arg.name] = value\n\n unflattened_kwargs = maybe_unflatten_dict(kwargs)\n\n output = await exec_tool.coroutine(**unflattened_kwargs)\n\n tool_content = []\n for item in output.content:\n item_dict = item.model_dump()\n tool_content.append(item_dict)\n return DataFrame(data=tool_content)\n return DataFrame(data=[{\"error\": \"You must select a tool\"}])\n except Exception as e:\n msg = f\"Error in build_output: {e!s}\"\n await logger.aexception(msg)\n raise ValueError(msg) from e\n\n def _get_session_context(self) -> str | None:\n \"\"\"Get the Langflow session ID for MCP session caching.\"\"\"\n # Try to get session ID from the component's execution context\n if hasattr(self, \"graph\") and hasattr(self.graph, \"session_id\"):\n session_id = self.graph.session_id\n # Include server name to ensure different servers get different sessions\n server_name = \"\"\n mcp_server = getattr(self, \"mcp_server\", None)\n if isinstance(mcp_server, dict):\n server_name = mcp_server.get(\"name\", \"\")\n elif mcp_server:\n server_name = str(mcp_server)\n return f\"{session_id}_{server_name}\" if session_id else None\n return None\n\n async def _get_tools(self):\n \"\"\"Get cached tools or update if necessary.\"\"\"\n mcp_server = getattr(self, \"mcp_server\", None)\n if not self._not_load_actions:\n tools, _ = await self.update_tool_list(mcp_server)\n return tools\n return []\n" }, + "is_refresh": false, "mcp_server": { "_input_type": "McpInput", "advanced": false, @@ -1136,6 +531,7 @@ "is_list": true, "list_add_label": "Add More", "name": "tools_metadata", + "override_skip": false, "placeholder": "", "real_time_refresh": true, "required": false, @@ -1147,6 +543,7 @@ "type": "tools", "value": [ { + "_uniqueId": "opensearch_url_ingestion_flow_opensearch_url_ingestion_flow_0", "args": { "input_value": { "anyOf": [ @@ -1171,6 +568,33 @@ "tags": [ "opensearch_url_ingestion_flow" ] + }, + { + "_uniqueId": "openrag_opensearch_agent_backu_openrag_opensearch_agent_backu_1", + "args": { + "input_value": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Message to be passed as input.", + "title": "Input Value" + } + }, + "description": "OpenRAG OpenSearch Agent", + "display_description": "OpenRAG OpenSearch Agent", + "display_name": "openrag_opensearch_agent_backu", + "name": "openrag_opensearch_agent_backu", + "readonly": false, + "status": false, + "tags": [ + "openrag_opensearch_agent_backu" + ] } ] }, @@ -1205,8 +629,8 @@ "width": 320 }, "position": { - "x": 725.4538018784724, - "y": 894.358559115783 + "x": 1508.8015756352295, + "y": 1384.557089807625 }, "selected": false, "type": "genericNode" @@ -1232,8 +656,8 @@ "width": 644 }, "position": { - "x": 19.942791510714386, - "y": 259.5061905471592 + "x": 998.5503708467761, + "y": 897.7285248967648 }, "resizing": false, "selected": false, @@ -1242,6 +666,8 @@ }, { "data": { + "description": "Get chat inputs from the Playground.", + "display_name": "Chat Input", "id": "ChatInput-ci8VE", "node": { "base_classes": [ @@ -1252,7 +678,7 @@ "custom_fields": {}, "description": "Get chat inputs from the Playground.", "display_name": "Chat Input", - "documentation": "https://docs.langflow.org/components-io#chat-input", + "documentation": "https://docs.langflow.org/chat-input-and-output", "edited": false, "field_order": [ "input_value", @@ -1267,17 +693,17 @@ "icon": "MessagesSquare", "legacy": false, "metadata": { - "code_hash": "0014a5b41817", + "code_hash": "7a26c54d89ed", "dependencies": { "dependencies": [ { "name": "lfx", - "version": "0.1.13.dev9" + "version": null } ], "total_dependencies": 1 }, - "module": "lfx.components.input_output.chat.ChatInput" + "module": "custom_components.chat_input" }, "minimized": true, "output_types": [], @@ -1287,8 +713,11 @@ "cache": true, "display_name": "Chat Message", "group_outputs": false, + "loop_types": null, "method": "message_response", "name": "message", + "options": null, + "required_inputs": null, "selected": "Message", "tool_mode": true, "types": [ @@ -1316,7 +745,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from lfx.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-input\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n # Ensure files is a list and filter out empty/None values\n files = self.files if self.files else []\n if files and not isinstance(files, list):\n files = [files]\n # Filter out None/empty values\n files = [f for f in files if f is not None and f != \"\"]\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n context_id=self.context_id,\n files=files,\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" + "value": "from lfx.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/chat-input-and-output\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n # Ensure files is a list and filter out empty/None values\n files = self.files if self.files else []\n if files and not isinstance(files, list):\n files = [files]\n # Filter out None/empty values\n files = [f for f in files if f is not None and f != \"\"]\n\n session_id = self.session_id or self.graph.session_id or \"\"\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=session_id,\n context_id=self.context_id,\n files=files,\n )\n if session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" }, "context_id": { "_input_type": "MessageTextInput", @@ -1331,6 +760,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "context_id", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1338,6 +768,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -1376,6 +807,7 @@ "list": true, "list_add_label": "Add More", "name": "files", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1383,12 +815,14 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "file", "value": "" }, "input_value": { "_input_type": "MultilineInput", "advanced": false, + "ai_enabled": false, "copy_field": false, "display_name": "Input Text", "dynamic": false, @@ -1399,6 +833,7 @@ "load_from_db": false, "multiline": true, "name": "input_value", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1406,6 +841,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -1424,6 +860,7 @@ "User" ], "options_metadata": [], + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1431,6 +868,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "User" }, @@ -1447,6 +885,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "sender_name", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1454,6 +893,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "User" }, @@ -1470,6 +910,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "session_id", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1477,6 +918,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -1489,12 +931,14 @@ "list": false, "list_add_label": "Add More", "name": "should_store_message", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "bool", "value": true } @@ -1511,14 +955,16 @@ "width": 192 }, "position": { - "x": 1287.1568425342111, - "y": 1247.5760137961474 + "x": 1599.1877452584524, + "y": 2275.678637253258 }, "selected": false, "type": "genericNode" }, { "data": { + "description": "Display a chat message in the Playground.", + "display_name": "Chat Output", "id": "ChatOutput-gWl8E", "node": { "base_classes": [ @@ -1529,7 +975,7 @@ "custom_fields": {}, "description": "Display a chat message in the Playground.", "display_name": "Chat Output", - "documentation": "https://docs.langflow.org/components-io#chat-output", + "documentation": "https://docs.langflow.org/chat-input-and-output", "edited": false, "field_order": [ "input_value", @@ -1545,7 +991,7 @@ "icon": "MessagesSquare", "legacy": false, "metadata": { - "code_hash": "4848ad3e35d5", + "code_hash": "cae45e2d53f6", "dependencies": { "dependencies": [ { @@ -1554,16 +1000,16 @@ }, { "name": "fastapi", - "version": "0.119.1" + "version": "0.120.0" }, { "name": "lfx", - "version": "0.1.13.dev9" + "version": null } ], "total_dependencies": 3 }, - "module": "lfx.components.input_output.chat_output.ChatOutput" + "module": "custom_components.chat_output" }, "minimized": true, "output_types": [], @@ -1573,8 +1019,11 @@ "cache": true, "display_name": "Output Message", "group_outputs": false, + "loop_types": null, "method": "message_response", "name": "message", + "options": null, + "required_inputs": null, "selected": "Message", "tool_mode": true, "types": [ @@ -1595,12 +1044,14 @@ "list": false, "list_add_label": "Add More", "name": "clean_data", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "bool", "value": true }, @@ -1620,7 +1071,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n advanced=True,\n info=\"Whether to clean data before converting to string.\",\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, _, display_name, source_id = self.get_properties_from_source_component()\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.context_id = self.context_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n clean_data: bool = getattr(self, \"clean_data\", False)\n return \"\\n\".join([safe_convert(item, clean_data=clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" + "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/chat-input-and-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n advanced=True,\n info=\"Whether to clean data before converting to string.\",\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, _, display_name, source_id = self.get_properties_from_source_component()\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message) and not self.is_connected_to_chat_input():\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id or self.graph.session_id or \"\"\n message.context_id = self.context_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n\n # Store message if needed\n if message.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n clean_data: bool = getattr(self, \"clean_data\", False)\n return \"\\n\".join([safe_convert(item, clean_data=clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" }, "context_id": { "_input_type": "MessageTextInput", @@ -1635,6 +1086,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "context_id", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1642,6 +1094,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -1658,6 +1111,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "data_template", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1665,6 +1119,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "{text}" }, @@ -1682,11 +1137,13 @@ "list": false, "list_add_label": "Add More", "name": "input_value", + "override_skip": false, "placeholder": "", "required": true, "show": true, "title_case": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "other", "value": "" }, @@ -1705,6 +1162,7 @@ "User" ], "options_metadata": [], + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1712,6 +1170,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "Machine" }, @@ -1728,6 +1187,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "sender_name", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1735,6 +1195,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "AI" }, @@ -1751,6 +1212,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "session_id", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1758,6 +1220,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -1770,12 +1233,14 @@ "list": false, "list_add_label": "Add More", "name": "should_store_message", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "bool", "value": true } @@ -1785,487 +1250,15 @@ "showNode": false, "type": "ChatOutput" }, + "dragging": false, "id": "ChatOutput-gWl8E", "measured": { "height": 48, "width": 192 }, "position": { - "x": 2115.4119264295655, - "y": 614.3278230710916 - }, - "selected": false, - "type": "genericNode" - }, - { - "data": { - "id": "EmbeddingModel-oPi95", - "node": { - "base_classes": [ - "Embeddings" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Generate embeddings using a specified provider.", - "display_name": "Embedding Model", - "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": false, - "field_order": [ - "provider", - "api_base", - "ollama_base_url", - "base_url_ibm_watsonx", - "model", - "api_key", - "project_id", - "dimensions", - "chunk_size", - "request_timeout", - "max_retries", - "show_progress_bar", - "model_kwargs", - "truncate_input_tokens", - "input_text" - ], - "frozen": false, - "icon": "binary", - "legacy": false, - "metadata": { - "code_hash": "c5e0a4535a27", - "dependencies": { - "dependencies": [ - { - "name": "requests", - "version": "2.32.5" - }, - { - "name": "ibm_watsonx_ai", - "version": "1.4.2" - }, - { - "name": "langchain_openai", - "version": "0.3.23" - }, - { - "name": "lfx", - "version": "0.2.0.dev19" - }, - { - "name": "langchain_ollama", - "version": "0.3.10" - }, - { - "name": "langchain_community", - "version": "0.3.21" - }, - { - "name": "langchain_ibm", - "version": "0.3.19" - } - ], - "total_dependencies": 7 - }, - "module": "custom_components.embedding_model" - }, - "minimized": false, - "output_types": [], - "outputs": [ - { - "allows_loop": false, - "cache": true, - "display_name": "Embedding Model", - "group_outputs": false, - "method": "build_embeddings", - "name": "embeddings", - "options": null, - "required_inputs": null, - "selected": "Embeddings", - "tool_mode": true, - "types": [ - "Embeddings" - ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "api_base": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "API Base URL", - "dynamic": false, - "info": "Base URL for the API. Leave empty for default.", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "api_base", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "" - }, - "api_key": { - "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "OpenAI API Key", - "dynamic": false, - "info": "Model Provider API key", - "input_types": [], - "load_from_db": true, - "name": "api_key", - "password": true, - "placeholder": "", - "real_time_refresh": true, - "required": true, - "show": true, - "title_case": false, - "track_in_telemetry": false, - "type": "str", - "value": "OPENAI_API_KEY" - }, - "base_url_ibm_watsonx": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "watsonx API Endpoint", - "dynamic": false, - "external_options": {}, - "info": "The base URL of the API (IBM watsonx.ai only)", - "name": "base_url_ibm_watsonx", - "options": [ - "https://us-south.ml.cloud.ibm.com", - "https://eu-de.ml.cloud.ibm.com", - "https://eu-gb.ml.cloud.ibm.com", - "https://au-syd.ml.cloud.ibm.com", - "https://jp-tok.ml.cloud.ibm.com", - "https://ca-tor.ml.cloud.ibm.com" - ], - "options_metadata": [], - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": false, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "https://us-south.ml.cloud.ibm.com" - }, - "chunk_size": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "Chunk Size", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "name": "chunk_size", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "int", - "value": 1000 - }, - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n return OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n return OllamaEmbeddings(\n model=model,\n base_url=transformed_base_url or \"http://localhost:11434\",\n **model_kwargs,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n credentials = Credentials(\n api_key=self.api_key,\n url=base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\",\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n return WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" - }, - "dimensions": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "Dimensions", - "dynamic": false, - "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", - "list": false, - "list_add_label": "Add More", - "name": "dimensions", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "int", - "value": "" - }, - "input_text": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Include the original text in the output", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "name": "input_text", - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "bool", - "value": true - }, - "max_retries": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "Max Retries", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "name": "max_retries", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "int", - "value": 3 - }, - "model": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Name", - "dynamic": false, - "external_options": {}, - "info": "Select the embedding model to use", - "name": "model", - "options": [ - "text-embedding-3-small", - "text-embedding-3-large", - "text-embedding-ada-002" - ], - "options_metadata": [], - "placeholder": "", - "real_time_refresh": true, - "refresh_button": true, - "required": false, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "text-embedding-3-small" - }, - "model_kwargs": { - "_input_type": "DictInput", - "advanced": true, - "display_name": "Model Kwargs", - "dynamic": false, - "info": "Additional keyword arguments to pass to the model.", - "list": false, - "list_add_label": "Add More", - "name": "model_kwargs", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "track_in_telemetry": false, - "type": "dict", - "value": {} - }, - "ollama_base_url": { - "_input_type": "MessageTextInput", - "advanced": false, - "display_name": "Ollama API URL", - "dynamic": false, - "info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "ollama_base_url", - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "" - }, - "project_id": { - "_input_type": "MessageTextInput", - "advanced": false, - "display_name": "Project ID", - "dynamic": false, - "info": "IBM watsonx.ai Project ID (required for IBM watsonx.ai)", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "project_id", - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "" - }, - "provider": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Provider", - "dynamic": false, - "external_options": {}, - "info": "Select the embedding model provider", - "name": "provider", - "options": [ - "OpenAI", - "Ollama", - "IBM watsonx.ai" - ], - "options_metadata": [ - { - "icon": "OpenAI" - }, - { - "icon": "Ollama" - }, - { - "icon": "WatsonxAI" - } - ], - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "OpenAI" - }, - "request_timeout": { - "_input_type": "FloatInput", - "advanced": true, - "display_name": "Request Timeout", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "name": "request_timeout", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "float", - "value": "" - }, - "show_progress_bar": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Show Progress Bar", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "name": "show_progress_bar", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "bool", - "value": false - }, - "truncate_input_tokens": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "Truncate Input Tokens", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "name": "truncate_input_tokens", - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "int", - "value": 200 - } - }, - "tool_mode": false - }, - "showNode": true, - "type": "EmbeddingModel" - }, - "dragging": false, - "id": "EmbeddingModel-oPi95", - "measured": { - "height": 369, - "width": 320 - }, - "position": { - "x": 729.3034344219965, - "y": 408.1587064380646 + "x": 2570.5017746875496, + "y": 1673.3586604488528 }, "selected": false, "type": "genericNode" @@ -2314,6 +1307,7 @@ ], "frozen": false, "icon": "bot", + "last_updated": "2025-11-26T00:05:16.025Z", "legacy": false, "metadata": { "code_hash": "d64b11c24a1c", @@ -2344,6 +1338,7 @@ "cache": true, "display_name": "Response", "group_outputs": false, + "loop_types": null, "method": "message_response", "name": "response", "options": null, @@ -2358,6 +1353,12 @@ ], "pinned": false, "template": { + "_frontend_node_flow_id": { + "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, "_type": "Component", "add_current_date_tool": { "_input_type": "BoolInput", @@ -2365,6 +1366,7 @@ "display_name": "Current Date", "dynamic": false, "info": "If true, will add a tool to the agent that returns the current date.", + "input_types": [], "list": false, "list_add_label": "Add More", "name": "add_current_date_tool", @@ -2471,6 +1473,7 @@ "input_types": [], "load_from_db": true, "name": "api_key", + "override_skip": false, "password": true, "placeholder": "", "real_time_refresh": true, @@ -2481,32 +1484,13 @@ "type": "str", "value": "OPENAI_API_KEY" }, - "base_url": { - "_input_type": "StrInput", - "advanced": false, - "display_name": "Base URL", - "dynamic": false, - "info": "The base URL of the API.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "base_url", - "placeholder": "", - "required": true, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "" - }, "code": { "advanced": true, "dynamic": true, "fileTypes": [], "file_path": "", "info": "", + "input_types": [], "list": false, "load_from_db": false, "multiline": true, @@ -2575,6 +1559,7 @@ "display_name": "Handle Parse Errors", "dynamic": false, "info": "Should the Agent fix errors when reading user input for better processing?", + "input_types": [], "list": false, "list_add_label": "Add More", "name": "handle_parsing_errors", @@ -2612,12 +1597,35 @@ "type": "str", "value": "" }, + "is_refresh": false, + "json_mode": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "JSON Mode", + "dynamic": false, + "info": "If True, it will output JSON regardless of passing a schema.", + "input_types": [], + "list": false, + "list_add_label": "Add More", + "name": "json_mode", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": false + }, "max_iterations": { "_input_type": "IntInput", "advanced": true, "display_name": "Max Iterations", "dynamic": false, "info": "The maximum number of attempts the agent can make to complete its task before it stops.", + "input_types": [], "list": false, "list_add_label": "Add More", "name": "max_iterations", @@ -2631,34 +1639,17 @@ "type": "int", "value": 15 }, - "max_output_tokens": { - "_input_type": "IntInput", - "advanced": false, - "display_name": "Max Output Tokens", - "dynamic": false, - "info": "The maximum number of tokens to generate.", - "list": false, - "list_add_label": "Add More", - "name": "max_output_tokens", - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "int", - "value": "" - }, "max_retries": { "_input_type": "IntInput", "advanced": true, "display_name": "Max Retries", "dynamic": false, "info": "The maximum number of retries to make when generating.", + "input_types": [], "list": false, "list_add_label": "Add More", "name": "max_retries", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -2675,9 +1666,11 @@ "display_name": "Max Tokens", "dynamic": false, "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", + "input_types": [], "list": false, "list_add_label": "Add More", "name": "max_tokens", + "override_skip": false, "placeholder": "", "range_spec": { "max": 128000, @@ -2700,9 +1693,11 @@ "display_name": "Model Kwargs", "dynamic": false, "info": "Additional keyword arguments to pass to the model.", + "input_types": [], "list": false, "list_add_label": "Add More", "name": "model_kwargs", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -2722,31 +1717,13 @@ "dynamic": false, "external_options": {}, "info": "To see the model names, first choose a provider. Then, enter your API key and click the refresh button next to the model name.", - "load_from_db": false, + "input_types": [], "name": "model_name", "options": [ - "gpt-4o-mini", - "gpt-4o", - "gpt-4.1", - "gpt-4.1-mini", - "gpt-4.1-nano", - "gpt-4-turbo", - "gpt-4-turbo-preview", - "gpt-4", - "gpt-3.5-turbo", - "gpt-5.1", - "gpt-5", - "gpt-5-mini", - "gpt-5-nano", - "gpt-5-chat-latest", - "o1", - "o3-mini", - "o3", - "o3-pro", - "o4-mini", - "o4-mini-high" + "gpt-4o" ], "options_metadata": [], + "override_skip": false, "placeholder": "", "real_time_refresh": false, "required": false, @@ -2765,6 +1742,7 @@ "display_name": "Number of Chat History Messages", "dynamic": false, "info": "Number of chat history messages to retrieve.", + "input_types": [], "list": false, "list_add_label": "Add More", "name": "n_messages", @@ -2784,10 +1762,12 @@ "display_name": "OpenAI API Base", "dynamic": false, "info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.", + "input_types": [], "list": false, "list_add_label": "Add More", "load_from_db": false, "name": "openai_api_base", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -2804,6 +1784,7 @@ "display_name": "Output Schema", "dynamic": false, "info": "Schema Validation: Define the structure and data types for structured output. No validation if no output schema.", + "input_types": [], "is_list": true, "list_add_label": "Add More", "name": "output_schema", @@ -2861,35 +1842,17 @@ "type": "table", "value": [] }, - "project_id": { - "_input_type": "StrInput", - "advanced": false, - "display_name": "Project ID", - "dynamic": false, - "info": "The project ID of the model.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "project_id", - "placeholder": "", - "required": true, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "" - }, "seed": { "_input_type": "IntInput", "advanced": true, "display_name": "Seed", "dynamic": false, "info": "The seed controls the reproducibility of the job.", + "input_types": [], "list": false, "list_add_label": "Add More", "name": "seed", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -2932,11 +1895,13 @@ "display_name": "Temperature", "dynamic": false, "info": "", + "input_types": [], "max_label": "", "max_label_icon": "", "min_label": "", "min_label_icon": "", "name": "temperature", + "override_skip": false, "placeholder": "", "range_spec": { "max": 1, @@ -2961,9 +1926,11 @@ "display_name": "Timeout", "dynamic": false, "info": "The timeout for requests to OpenAI completion API.", + "input_types": [], "list": false, "list_add_label": "Add More", "name": "timeout", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -3001,6 +1968,7 @@ "display_name": "Verbose", "dynamic": false, "info": "", + "input_types": [], "list": false, "list_add_label": "Add More", "name": "verbose", @@ -3027,14 +1995,16 @@ "width": 320 }, "position": { - "x": 1629.578423203229, - "y": 451.946400444934 + "x": 2063.120880097103, + "y": 1543.154271545972 }, "selected": false, "type": "genericNode" }, { "data": { + "description": "Perform basic arithmetic operations on a given expression.", + "display_name": "Calculator", "id": "CalculatorComponent-KrlMH", "node": { "base_classes": [ @@ -3045,17 +2015,17 @@ "custom_fields": {}, "description": "Perform basic arithmetic operations on a given expression.", "display_name": "Calculator", - "documentation": "https://docs.langflow.org/components-helpers#calculator", + "documentation": "https://docs.langflow.org/calculator", "edited": false, "field_order": [ "expression" ], "frozen": false, "icon": "calculator", - "last_updated": "2025-11-24T18:02:41.468Z", + "last_updated": "2025-11-26T00:05:16.026Z", "legacy": false, "metadata": { - "code_hash": "5fcfa26be77d", + "code_hash": "acbe2603b034", "dependencies": { "dependencies": [ { @@ -3065,7 +2035,7 @@ ], "total_dependencies": 1 }, - "module": "lfx.components.helpers.calculator_core.CalculatorComponent" + "module": "custom_components.calculator" }, "minimized": false, "output_types": [], @@ -3076,6 +2046,7 @@ "display_name": "Toolset", "group_outputs": false, "hidden": null, + "loop_types": null, "method": "to_toolkit", "name": "component_as_tool", "options": null, @@ -3090,6 +2061,12 @@ ], "pinned": false, "template": { + "_frontend_node_flow_id": { + "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, "_type": "Component", "code": { "advanced": true, @@ -3107,7 +2084,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import ast\nimport operator\nfrom collections.abc import Callable\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.inputs.inputs import MessageTextInput\nfrom lfx.io import Output\nfrom lfx.schema.data import Data\n\n\nclass CalculatorComponent(Component):\n display_name = \"Calculator\"\n description = \"Perform basic arithmetic operations on a given expression.\"\n documentation: str = \"https://docs.langflow.org/components-helpers#calculator\"\n icon = \"calculator\"\n\n # Cache operators dictionary as a class variable\n OPERATORS: dict[type[ast.operator], Callable] = {\n ast.Add: operator.add,\n ast.Sub: operator.sub,\n ast.Mult: operator.mul,\n ast.Div: operator.truediv,\n ast.Pow: operator.pow,\n }\n\n inputs = [\n MessageTextInput(\n name=\"expression\",\n display_name=\"Expression\",\n info=\"The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').\",\n tool_mode=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"result\", type_=Data, method=\"evaluate_expression\"),\n ]\n\n def _eval_expr(self, node: ast.AST) -> float:\n \"\"\"Evaluate an AST node recursively.\"\"\"\n if isinstance(node, ast.Constant):\n if isinstance(node.value, int | float):\n return float(node.value)\n error_msg = f\"Unsupported constant type: {type(node.value).__name__}\"\n raise TypeError(error_msg)\n if isinstance(node, ast.Num): # For backwards compatibility\n if isinstance(node.n, int | float):\n return float(node.n)\n error_msg = f\"Unsupported number type: {type(node.n).__name__}\"\n raise TypeError(error_msg)\n\n if isinstance(node, ast.BinOp):\n op_type = type(node.op)\n if op_type not in self.OPERATORS:\n error_msg = f\"Unsupported binary operator: {op_type.__name__}\"\n raise TypeError(error_msg)\n\n left = self._eval_expr(node.left)\n right = self._eval_expr(node.right)\n return self.OPERATORS[op_type](left, right)\n\n error_msg = f\"Unsupported operation or expression type: {type(node).__name__}\"\n raise TypeError(error_msg)\n\n def evaluate_expression(self) -> Data:\n \"\"\"Evaluate the mathematical expression and return the result.\"\"\"\n try:\n tree = ast.parse(self.expression, mode=\"eval\")\n result = self._eval_expr(tree.body)\n\n formatted_result = f\"{float(result):.6f}\".rstrip(\"0\").rstrip(\".\")\n self.log(f\"Calculation result: {formatted_result}\")\n\n self.status = formatted_result\n return Data(data={\"result\": formatted_result})\n\n except ZeroDivisionError:\n error_message = \"Error: Division by zero\"\n self.status = error_message\n return Data(data={\"error\": error_message, \"input\": self.expression})\n\n except (SyntaxError, TypeError, KeyError, ValueError, AttributeError, OverflowError) as e:\n error_message = f\"Invalid expression: {e!s}\"\n self.status = error_message\n return Data(data={\"error\": error_message, \"input\": self.expression})\n\n def build(self):\n \"\"\"Return the main evaluation function.\"\"\"\n return self.evaluate_expression\n" + "value": "import ast\nimport operator\nfrom collections.abc import Callable\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.inputs.inputs import MessageTextInput\nfrom lfx.io import Output\nfrom lfx.schema.data import Data\n\n\nclass CalculatorComponent(Component):\n display_name = \"Calculator\"\n description = \"Perform basic arithmetic operations on a given expression.\"\n documentation: str = \"https://docs.langflow.org/calculator\"\n icon = \"calculator\"\n\n # Cache operators dictionary as a class variable\n OPERATORS: dict[type[ast.operator], Callable] = {\n ast.Add: operator.add,\n ast.Sub: operator.sub,\n ast.Mult: operator.mul,\n ast.Div: operator.truediv,\n ast.Pow: operator.pow,\n }\n\n inputs = [\n MessageTextInput(\n name=\"expression\",\n display_name=\"Expression\",\n info=\"The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').\",\n tool_mode=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"result\", type_=Data, method=\"evaluate_expression\"),\n ]\n\n def _eval_expr(self, node: ast.AST) -> float:\n \"\"\"Evaluate an AST node recursively.\"\"\"\n if isinstance(node, ast.Constant):\n if isinstance(node.value, int | float):\n return float(node.value)\n error_msg = f\"Unsupported constant type: {type(node.value).__name__}\"\n raise TypeError(error_msg)\n if isinstance(node, ast.Num): # For backwards compatibility\n if isinstance(node.n, int | float):\n return float(node.n)\n error_msg = f\"Unsupported number type: {type(node.n).__name__}\"\n raise TypeError(error_msg)\n\n if isinstance(node, ast.BinOp):\n op_type = type(node.op)\n if op_type not in self.OPERATORS:\n error_msg = f\"Unsupported binary operator: {op_type.__name__}\"\n raise TypeError(error_msg)\n\n left = self._eval_expr(node.left)\n right = self._eval_expr(node.right)\n return self.OPERATORS[op_type](left, right)\n\n error_msg = f\"Unsupported operation or expression type: {type(node).__name__}\"\n raise TypeError(error_msg)\n\n def evaluate_expression(self) -> Data:\n \"\"\"Evaluate the mathematical expression and return the result.\"\"\"\n try:\n tree = ast.parse(self.expression, mode=\"eval\")\n result = self._eval_expr(tree.body)\n\n formatted_result = f\"{float(result):.6f}\".rstrip(\"0\").rstrip(\".\")\n self.log(f\"Calculation result: {formatted_result}\")\n\n self.status = formatted_result\n return Data(data={\"result\": formatted_result})\n\n except ZeroDivisionError:\n error_message = \"Error: Division by zero\"\n self.status = error_message\n return Data(data={\"error\": error_message, \"input\": self.expression})\n\n except (SyntaxError, TypeError, KeyError, ValueError, AttributeError, OverflowError) as e:\n error_message = f\"Invalid expression: {e!s}\"\n self.status = error_message\n return Data(data={\"error\": error_message, \"input\": self.expression})\n\n def build(self):\n \"\"\"Return the main evaluation function.\"\"\"\n return self.evaluate_expression\n" }, "expression": { "_input_type": "MessageTextInput", @@ -3122,6 +2099,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "expression", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -3129,9 +2107,11 @@ "tool_mode": true, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, + "is_refresh": false, "tools_metadata": { "_input_type": "ToolsInput", "advanced": false, @@ -3141,6 +2121,7 @@ "is_list": true, "list_add_label": "Add More", "name": "tools_metadata", + "override_skip": false, "placeholder": "", "real_time_refresh": true, "required": false, @@ -3152,7 +2133,6 @@ "type": "tools", "value": [ { - "_uniqueId": "evaluate_expression_evaluate_expression_0", "args": { "expression": { "default": "", @@ -3186,24 +2166,2293 @@ "width": 320 }, "position": { - "x": 740.6385299138288, - "y": 1287.3185590984713 + "x": 1498.151476724705, + "y": 1719.4446700170724 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "description": "Generate embeddings using a specified provider.", + "display_name": "Embedding Model", + "id": "EmbeddingModel-aIP4U", + "node": { + "base_classes": [ + "Embeddings" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Generate embeddings using a specified provider.", + "display_name": "Embedding Model", + "documentation": "https://docs.langflow.org/components-embedding-models", + "edited": false, + "field_order": [ + "provider", + "api_base", + "ollama_base_url", + "base_url_ibm_watsonx", + "model", + "api_key", + "project_id", + "dimensions", + "chunk_size", + "request_timeout", + "max_retries", + "show_progress_bar", + "model_kwargs", + "truncate_input_tokens", + "input_text" + ], + "frozen": false, + "icon": "binary", + "last_updated": "2025-11-26T00:05:16.026Z", + "legacy": false, + "metadata": { + "code_hash": "9e44c83a5058", + "dependencies": { + "dependencies": [ + { + "name": "requests", + "version": "2.32.5" + }, + { + "name": "ibm_watsonx_ai", + "version": "1.4.2" + }, + { + "name": "langchain_openai", + "version": "0.3.23" + }, + { + "name": "lfx", + "version": null + }, + { + "name": "langchain_ollama", + "version": "0.3.10" + }, + { + "name": "langchain_community", + "version": "0.3.21" + }, + { + "name": "langchain_ibm", + "version": "0.3.19" + } + ], + "total_dependencies": 7 + }, + "module": "custom_components.embedding_model" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Embedding Model", + "group_outputs": false, + "loop_types": null, + "method": "build_embeddings", + "name": "embeddings", + "options": null, + "required_inputs": null, + "selected": "Embeddings", + "tool_mode": true, + "types": [ + "Embeddings" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, + "_type": "Component", + "api_base": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "API Base URL", + "dynamic": false, + "info": "Base URL for the API. Leave empty for default.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "api_base", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "api_key": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "OpenAI API Key", + "dynamic": false, + "info": "Model Provider API key", + "input_types": [], + "load_from_db": true, + "name": "api_key", + "override_skip": false, + "password": true, + "placeholder": "", + "real_time_refresh": true, + "required": true, + "show": true, + "title_case": false, + "track_in_telemetry": false, + "type": "str", + "value": "OPENAI_API_KEY" + }, + "base_url_ibm_watsonx": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "watsonx API Endpoint", + "dynamic": false, + "external_options": {}, + "info": "The base URL of the API (IBM watsonx.ai only)", + "name": "base_url_ibm_watsonx", + "options": [ + "https://us-south.ml.cloud.ibm.com", + "https://eu-de.ml.cloud.ibm.com", + "https://eu-gb.ml.cloud.ibm.com", + "https://au-syd.ml.cloud.ibm.com", + "https://jp-tok.ml.cloud.ibm.com", + "https://ca-tor.ml.cloud.ibm.com" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": false, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "https://us-south.ml.cloud.ibm.com" + }, + "chunk_size": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Chunk Size", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "chunk_size", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 1000 + }, + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + }, + "dimensions": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Dimensions", + "dynamic": false, + "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", + "list": false, + "list_add_label": "Add More", + "name": "dimensions", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": "" + }, + "input_text": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Include the original text in the output", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "input_text", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "is_refresh": false, + "max_retries": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Max Retries", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "max_retries", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 3 + }, + "model": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Model Name", + "dynamic": false, + "external_options": {}, + "info": "Select the embedding model to use", + "name": "model", + "options": [ + "text-embedding-3-small", + "text-embedding-3-large", + "text-embedding-ada-002" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "refresh_button": true, + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "text-embedding-3-small" + }, + "model_kwargs": { + "_input_type": "DictInput", + "advanced": true, + "display_name": "Model Kwargs", + "dynamic": false, + "info": "Additional keyword arguments to pass to the model.", + "list": false, + "list_add_label": "Add More", + "name": "model_kwargs", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "track_in_telemetry": false, + "type": "dict", + "value": {} + }, + "ollama_base_url": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "Ollama API URL", + "dynamic": false, + "info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "ollama_base_url", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "project_id": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "Project ID", + "dynamic": false, + "info": "IBM watsonx.ai Project ID (required for IBM watsonx.ai)", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "project_id", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "provider": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Model Provider", + "dynamic": false, + "external_options": {}, + "info": "Select the embedding model provider", + "name": "provider", + "options": [ + "OpenAI", + "Ollama", + "IBM watsonx.ai" + ], + "options_metadata": [ + { + "icon": "OpenAI" + }, + { + "icon": "Ollama" + }, + { + "icon": "WatsonxAI" + } + ], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "OpenAI" + }, + "request_timeout": { + "_input_type": "FloatInput", + "advanced": true, + "display_name": "Request Timeout", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "request_timeout", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "float", + "value": "" + }, + "show_progress_bar": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Show Progress Bar", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "show_progress_bar", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": false + }, + "truncate_input_tokens": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Truncate Input Tokens", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "truncate_input_tokens", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 200 + } + }, + "tool_mode": false + }, + "showNode": true, + "type": "EmbeddingModel" + }, + "dragging": false, + "id": "EmbeddingModel-aIP4U", + "measured": { + "height": 369, + "width": 320 + }, + "position": { + "x": 500.6819779163044, + "y": 1881.18804656446 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE", + "node": { + "base_classes": [ + "Data", + "DataFrame", + "VectorStore" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.", + "display_name": "OpenSearch (Multi-Model Multi-Embedding)", + "documentation": "", + "edited": false, + "field_order": [ + "docs_metadata", + "opensearch_url", + "index_name", + "engine", + "space_type", + "ef_construction", + "m", + "num_candidates", + "ingest_data", + "search_query", + "should_cache_vector_store", + "embedding", + "embedding_model_name", + "vector_field", + "number_of_results", + "filter_expression", + "auth_mode", + "username", + "password", + "jwt_token", + "jwt_header", + "bearer_prefix", + "use_ssl", + "verify_certs" + ], + "frozen": false, + "icon": "OpenSearch", + "last_updated": "2025-11-26T00:05:16.028Z", + "legacy": false, + "metadata": { + "code_hash": "8c78d799fef4", + "dependencies": { + "dependencies": [ + { + "name": "opensearchpy", + "version": "2.8.0" + }, + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 2 + }, + "module": "lfx.components.elastic.opensearch_multimodal.OpenSearchVectorStoreComponentMultimodalMultiEmbedding" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Toolset", + "group_outputs": false, + "hidden": null, + "loop_types": null, + "method": "to_toolkit", + "name": "component_as_tool", + "options": null, + "required_inputs": null, + "selected": "Tool", + "tool_mode": true, + "types": [ + "Tool" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, + "_type": "Component", + "auth_mode": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Authentication Mode", + "dynamic": false, + "external_options": {}, + "info": "Authentication method: 'basic' for username/password authentication, or 'jwt' for JSON Web Token (Bearer) authentication.", + "name": "auth_mode", + "options": [ + "basic", + "jwt" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "jwt" + }, + "bearer_prefix": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Prefix 'Bearer '", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "bearer_prefix", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n self.log(self.ingest_data)\n client = self.build_client()\n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n docs = self.ingest_data or []\n if not docs:\n self.log(\"No documents to ingest.\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n if not embeddings_list:\n msg = \"At least one embedding is required to embed documents.\"\n raise ValueError(msg)\n\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n max_workers = min(max(len(texts), 1), 8)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n return [self._get_embedding_model_name()]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models and isinstance(available_models, dict):\n logger.info(f\"Embedding object {idx} provides {len(available_models)} models via available_models dict\")\n for model_name_key, dedicated_embedding in available_models.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n else:\n # No matching embedding found for this model\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available models: {list(embedding_by_model.keys())}\"\n )\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n\n if not query_embeddings:\n msg = \"Failed to generate embeddings for any model\"\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n raw = self.search(self.search_query or \"\")\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n self.log(self.ingest_data)\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" + }, + "docs_metadata": { + "_input_type": "TableInput", + "advanced": false, + "display_name": "Document Metadata", + "dynamic": false, + "info": "Additional metadata key-value pairs to be added to all ingested documents. Useful for tagging documents with source information, categories, or other custom attributes.", + "input_types": [ + "Data" + ], + "is_list": true, + "list_add_label": "Add More", + "name": "docs_metadata", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "table_icon": "Table", + "table_schema": [ + { + "description": "Key name", + "display_name": "Key", + "formatter": "text", + "name": "key", + "type": "str" + }, + { + "description": "Value of the metadata", + "display_name": "Value", + "formatter": "text", + "name": "value", + "type": "str" + } + ], + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "trigger_icon": "Table", + "trigger_text": "Open table", + "type": "table", + "value": [] + }, + "ef_construction": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "EF Construction", + "dynamic": false, + "info": "Size of the dynamic candidate list during index construction. Higher values improve recall but increase indexing time and memory usage.", + "list": false, + "list_add_label": "Add More", + "name": "ef_construction", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 512 + }, + "embedding": { + "_input_type": "HandleInput", + "advanced": false, + "display_name": "Embedding", + "dynamic": false, + "info": "", + "input_types": [ + "Embeddings" + ], + "list": true, + "list_add_label": "Add More", + "name": "embedding", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "other", + "value": "" + }, + "embedding_model_name": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "Embedding Model Name", + "dynamic": false, + "info": "Name of the embedding model to use for ingestion. This selects which embedding from the list will be used to embed documents. Matches on deployment, model, model_id, or model_name. For duplicate deployments, use combined format: 'deployment:model' (e.g., 'text-embedding-ada-002:text-embedding-3-large'). Leave empty to use the first embedding. Error message will show all available identifiers.", + "list": false, + "list_add_label": "Add More", + "load_from_db": true, + "name": "embedding_model_name", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "SELECTED_EMBEDDING_MODEL" + }, + "engine": { + "_input_type": "DropdownInput", + "advanced": true, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Vector Engine", + "dynamic": false, + "external_options": {}, + "info": "Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.", + "name": "engine", + "options": [ + "jvector", + "nmslib", + "faiss", + "lucene" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "jvector" + }, + "filter_expression": { + "_input_type": "MultilineInput", + "advanced": false, + "ai_enabled": false, + "copy_field": false, + "display_name": "Search Filters (JSON)", + "dynamic": false, + "info": "Optional JSON configuration for search filtering, result limits, and score thresholds.\n\nFormat 1 - Explicit filters:\n{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, {\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\n\nFormat 2 - Context-style mapping:\n{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\n\nUse __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "multiline": true, + "name": "filter_expression", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "index_name": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "Index Name", + "dynamic": false, + "info": "The OpenSearch index name where documents will be stored and searched. Will be created automatically if it doesn't exist.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "index_name", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "documents" + }, + "ingest_data": { + "_input_type": "HandleInput", + "advanced": false, + "display_name": "Ingest Data", + "dynamic": false, + "info": "", + "input_types": [ + "Data", + "DataFrame" + ], + "list": true, + "list_add_label": "Add More", + "name": "ingest_data", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "other", + "value": "" + }, + "is_refresh": false, + "jwt_header": { + "_input_type": "StrInput", + "advanced": true, + "display_name": "JWT Header Name", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "jwt_header", + "override_skip": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "Authorization" + }, + "jwt_token": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "JWT Token", + "dynamic": false, + "info": "Valid JSON Web Token for authentication. Will be sent in the Authorization header (with optional 'Bearer ' prefix).", + "input_types": [], + "load_from_db": true, + "name": "jwt_token", + "override_skip": false, + "password": true, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "track_in_telemetry": false, + "type": "str", + "value": "JWT" + }, + "m": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "M Parameter", + "dynamic": false, + "info": "Number of bidirectional connections for each vector in the HNSW graph. Higher values improve search quality but increase memory usage and indexing time.", + "list": false, + "list_add_label": "Add More", + "name": "m", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 16 + }, + "num_candidates": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Candidate Pool Size", + "dynamic": false, + "info": "Number of approximate neighbors to consider for each KNN query. Some OpenSearch deployments do not support this parameter; set to 0 to disable.", + "list": false, + "list_add_label": "Add More", + "name": "num_candidates", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 1000 + }, + "number_of_results": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Default Result Limit", + "dynamic": false, + "info": "Default maximum number of search results to return when no limit is specified in the filter expression.", + "list": false, + "list_add_label": "Add More", + "name": "number_of_results", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 10 + }, + "opensearch_url": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "OpenSearch URL", + "dynamic": false, + "info": "The connection URL for your OpenSearch cluster (e.g., http://localhost:9200 for local development or your cloud endpoint).", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "opensearch_url", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "https://opensearch:9200" + }, + "password": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "OpenSearch Password", + "dynamic": false, + "info": "", + "input_types": [], + "load_from_db": false, + "name": "password", + "override_skip": false, + "password": true, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "search_query": { + "_input_type": "QueryInput", + "advanced": false, + "display_name": "Search Query", + "dynamic": false, + "info": "Enter a query to run a similarity search.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "search_query", + "override_skip": false, + "placeholder": "Enter a query...", + "required": false, + "show": true, + "title_case": false, + "tool_mode": true, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "query", + "value": "" + }, + "should_cache_vector_store": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Cache Vector Store", + "dynamic": false, + "info": "If True, the vector store will be cached for the current build of the component. This is useful for components that have multiple output methods and want to share the same vector store.", + "list": false, + "list_add_label": "Add More", + "name": "should_cache_vector_store", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "space_type": { + "_input_type": "DropdownInput", + "advanced": true, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Distance Metric", + "dynamic": false, + "external_options": {}, + "info": "Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, 'cosinesimil' for cosine similarity, 'innerproduct' for dot product.", + "name": "space_type", + "options": [ + "l2", + "l1", + "cosinesimil", + "linf", + "innerproduct" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "l2" + }, + "tools_metadata": { + "_input_type": "ToolsInput", + "advanced": false, + "display_name": "Actions", + "dynamic": false, + "info": "Modify tool names and descriptions to help agents understand when to use each tool.", + "is_list": true, + "list_add_label": "Add More", + "name": "tools_metadata", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "tools", + "value": [ + { + "_uniqueId": "search_documents_search_documents_0", + "args": { + "search_query": { + "default": "", + "description": "Enter a query to run a similarity search.", + "title": "Search Query", + "type": "string" + } + }, + "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.", + "display_description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.", + "display_name": "search_documents", + "name": "search_documents", + "readonly": false, + "status": true, + "tags": [ + "search_documents" + ] + }, + { + "_uniqueId": "as_dataframe_as_dataframe_1", + "args": { + "search_query": { + "default": "", + "description": "Enter a query to run a similarity search.", + "title": "Search Query", + "type": "string" + } + }, + "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.", + "display_description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.", + "display_name": "as_dataframe", + "name": "as_dataframe", + "readonly": false, + "status": false, + "tags": [ + "as_dataframe" + ] + }, + { + "_uniqueId": "as_vector_store_as_vector_store_2", + "args": { + "search_query": { + "default": "", + "description": "Enter a query to run a similarity search.", + "title": "Search Query", + "type": "string" + } + }, + "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.", + "display_description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.", + "display_name": "as_vector_store", + "name": "as_vector_store", + "readonly": false, + "status": false, + "tags": [ + "as_vector_store" + ] + } + ] + }, + "use_ssl": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Use SSL/TLS", + "dynamic": false, + "info": "Enable SSL/TLS encryption for secure connections to OpenSearch.", + "list": false, + "list_add_label": "Add More", + "name": "use_ssl", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "username": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "Username", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "username", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "admin" + }, + "vector_field": { + "_input_type": "StrInput", + "advanced": true, + "display_name": "Legacy Vector Field Name", + "dynamic": false, + "info": "Legacy field name for backward compatibility. New documents use dynamic fields (chunk_embedding_{model_name}) based on the embedding_model_name.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "vector_field", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "chunk_embedding" + }, + "verify_certs": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Verify SSL Certificates", + "dynamic": false, + "info": "Verify SSL certificates when connecting. Disable for self-signed certificates in development environments.", + "list": false, + "list_add_label": "Add More", + "name": "verify_certs", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": false + } + }, + "tool_mode": true + }, + "selected_output": "dataframe", + "showNode": true, + "type": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding" + }, + "dragging": false, + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE", + "measured": { + "height": 902, + "width": 320 + }, + "position": { + "x": 1098.7085719475467, + "y": 1410.4984401198574 + }, + "selected": true, + "type": "genericNode" + }, + { + "data": { + "description": "Generate embeddings using a specified provider.", + "display_name": "Embedding Model", + "id": "EmbeddingModel-J6YgA", + "node": { + "base_classes": [ + "Embeddings" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Generate embeddings using a specified provider.", + "display_name": "Embedding Model", + "documentation": "https://docs.langflow.org/components-embedding-models", + "edited": false, + "field_order": [ + "provider", + "api_base", + "ollama_base_url", + "base_url_ibm_watsonx", + "model", + "api_key", + "project_id", + "dimensions", + "chunk_size", + "request_timeout", + "max_retries", + "show_progress_bar", + "model_kwargs", + "truncate_input_tokens", + "input_text" + ], + "frozen": false, + "icon": "binary", + "last_updated": "2025-11-26T00:05:16.028Z", + "legacy": false, + "metadata": { + "code_hash": "9e44c83a5058", + "dependencies": { + "dependencies": [ + { + "name": "requests", + "version": "2.32.5" + }, + { + "name": "ibm_watsonx_ai", + "version": "1.4.2" + }, + { + "name": "langchain_openai", + "version": "0.3.23" + }, + { + "name": "lfx", + "version": null + }, + { + "name": "langchain_ollama", + "version": "0.3.10" + }, + { + "name": "langchain_community", + "version": "0.3.21" + }, + { + "name": "langchain_ibm", + "version": "0.3.19" + } + ], + "total_dependencies": 7 + }, + "module": "custom_components.embedding_model" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Embedding Model", + "group_outputs": false, + "loop_types": null, + "method": "build_embeddings", + "name": "embeddings", + "options": null, + "required_inputs": null, + "selected": "Embeddings", + "tool_mode": true, + "types": [ + "Embeddings" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, + "_type": "Component", + "api_base": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "API Base URL", + "dynamic": false, + "info": "Base URL for the API. Leave empty for default.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "api_base", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "api_key": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "API Key (Optional)", + "dynamic": false, + "info": "Model Provider API key", + "input_types": [], + "load_from_db": true, + "name": "api_key", + "override_skip": false, + "password": true, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": false, + "title_case": false, + "track_in_telemetry": false, + "type": "str", + "value": "OPENAI_API_KEY" + }, + "base_url_ibm_watsonx": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "watsonx API Endpoint", + "dynamic": false, + "external_options": {}, + "info": "The base URL of the API (IBM watsonx.ai only)", + "name": "base_url_ibm_watsonx", + "options": [ + "https://us-south.ml.cloud.ibm.com", + "https://eu-de.ml.cloud.ibm.com", + "https://eu-gb.ml.cloud.ibm.com", + "https://au-syd.ml.cloud.ibm.com", + "https://jp-tok.ml.cloud.ibm.com", + "https://ca-tor.ml.cloud.ibm.com" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": false, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "https://us-south.ml.cloud.ibm.com" + }, + "chunk_size": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Chunk Size", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "chunk_size", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 1000 + }, + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + }, + "dimensions": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Dimensions", + "dynamic": false, + "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", + "list": false, + "list_add_label": "Add More", + "name": "dimensions", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": "" + }, + "input_text": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Include the original text in the output", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "input_text", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "is_refresh": false, + "max_retries": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Max Retries", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "max_retries", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 3 + }, + "model": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Model Name", + "dynamic": false, + "external_options": {}, + "info": "Select the embedding model to use", + "name": "model", + "options": [ + "bge-large:latest", + "qwen3-embedding:4b" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "refresh_button": true, + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "bge-large:latest" + }, + "model_kwargs": { + "_input_type": "DictInput", + "advanced": true, + "display_name": "Model Kwargs", + "dynamic": false, + "info": "Additional keyword arguments to pass to the model.", + "list": false, + "list_add_label": "Add More", + "name": "model_kwargs", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "track_in_telemetry": false, + "type": "dict", + "value": {} + }, + "ollama_base_url": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "Ollama API URL", + "dynamic": false, + "info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": true, + "name": "ollama_base_url", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "OLLAMA_BASE_URL" + }, + "project_id": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "Project ID", + "dynamic": false, + "info": "IBM watsonx.ai Project ID (required for IBM watsonx.ai)", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "project_id", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "provider": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Model Provider", + "dynamic": false, + "external_options": {}, + "info": "Select the embedding model provider", + "name": "provider", + "options": [ + "OpenAI", + "Ollama", + "IBM watsonx.ai" + ], + "options_metadata": [ + { + "icon": "OpenAI" + }, + { + "icon": "Ollama" + }, + { + "icon": "WatsonxAI" + } + ], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "selected_metadata": { + "icon": "Ollama" + }, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "Ollama" + }, + "request_timeout": { + "_input_type": "FloatInput", + "advanced": true, + "display_name": "Request Timeout", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "request_timeout", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "float", + "value": "" + }, + "show_progress_bar": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Show Progress Bar", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "show_progress_bar", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": false + }, + "truncate_input_tokens": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Truncate Input Tokens", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "truncate_input_tokens", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 200 + } + }, + "tool_mode": false + }, + "showNode": true, + "type": "EmbeddingModel" + }, + "id": "EmbeddingModel-J6YgA", + "measured": { + "height": 369, + "width": 320 + }, + "position": { + "x": 494.1867639968285, + "y": 1470.1965849152762 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "description": "Generate embeddings using a specified provider.", + "display_name": "Embedding Model", + "id": "EmbeddingModel-zLHKs", + "node": { + "base_classes": [ + "Embeddings" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Generate embeddings using a specified provider.", + "display_name": "Embedding Model", + "documentation": "https://docs.langflow.org/components-embedding-models", + "edited": false, + "field_order": [ + "provider", + "api_base", + "ollama_base_url", + "base_url_ibm_watsonx", + "model", + "api_key", + "project_id", + "dimensions", + "chunk_size", + "request_timeout", + "max_retries", + "show_progress_bar", + "model_kwargs", + "truncate_input_tokens", + "input_text" + ], + "frozen": false, + "icon": "binary", + "last_updated": "2025-11-26T00:05:16.029Z", + "legacy": false, + "metadata": { + "code_hash": "9e44c83a5058", + "dependencies": { + "dependencies": [ + { + "name": "requests", + "version": "2.32.5" + }, + { + "name": "ibm_watsonx_ai", + "version": "1.4.2" + }, + { + "name": "langchain_openai", + "version": "0.3.23" + }, + { + "name": "lfx", + "version": null + }, + { + "name": "langchain_ollama", + "version": "0.3.10" + }, + { + "name": "langchain_community", + "version": "0.3.21" + }, + { + "name": "langchain_ibm", + "version": "0.3.19" + } + ], + "total_dependencies": 7 + }, + "module": "custom_components.embedding_model" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Embedding Model", + "group_outputs": false, + "loop_types": null, + "method": "build_embeddings", + "name": "embeddings", + "options": null, + "required_inputs": null, + "selected": "Embeddings", + "tool_mode": true, + "types": [ + "Embeddings" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, + "_type": "Component", + "api_base": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "API Base URL", + "dynamic": false, + "info": "Base URL for the API. Leave empty for default.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "api_base", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "api_key": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "IBM watsonx.ai API Key", + "dynamic": false, + "info": "Model Provider API key", + "input_types": [], + "load_from_db": true, + "name": "api_key", + "override_skip": false, + "password": true, + "placeholder": "", + "real_time_refresh": true, + "required": true, + "show": true, + "title_case": false, + "track_in_telemetry": false, + "type": "str", + "value": "WATSONX_API_KEY" + }, + "base_url_ibm_watsonx": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "watsonx API Endpoint", + "dynamic": false, + "external_options": {}, + "info": "The base URL of the API (IBM watsonx.ai only)", + "name": "base_url_ibm_watsonx", + "options": [ + "https://us-south.ml.cloud.ibm.com", + "https://eu-de.ml.cloud.ibm.com", + "https://eu-gb.ml.cloud.ibm.com", + "https://au-syd.ml.cloud.ibm.com", + "https://jp-tok.ml.cloud.ibm.com", + "https://ca-tor.ml.cloud.ibm.com" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "https://us-south.ml.cloud.ibm.com" + }, + "chunk_size": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Chunk Size", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "chunk_size", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 1000 + }, + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + }, + "dimensions": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Dimensions", + "dynamic": false, + "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", + "list": false, + "list_add_label": "Add More", + "name": "dimensions", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": "" + }, + "input_text": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Include the original text in the output", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "input_text", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "is_refresh": false, + "max_retries": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Max Retries", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "max_retries", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 3 + }, + "model": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Model Name", + "dynamic": false, + "external_options": {}, + "info": "Select the embedding model to use", + "name": "model", + "options": [ + "ibm/granite-embedding-278m-multilingual", + "ibm/slate-125m-english-rtrvr-v2", + "ibm/slate-30m-english-rtrvr-v2", + "intfloat/multilingual-e5-large", + "sentence-transformers/all-minilm-l6-v2" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "refresh_button": true, + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "ibm/granite-embedding-278m-multilingual" + }, + "model_kwargs": { + "_input_type": "DictInput", + "advanced": true, + "display_name": "Model Kwargs", + "dynamic": false, + "info": "Additional keyword arguments to pass to the model.", + "list": false, + "list_add_label": "Add More", + "name": "model_kwargs", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "track_in_telemetry": false, + "type": "dict", + "value": {} + }, + "ollama_base_url": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "Ollama API URL", + "dynamic": false, + "info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": true, + "name": "ollama_base_url", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "OLLAMA_BASE_URL" + }, + "project_id": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "Project ID", + "dynamic": false, + "info": "IBM watsonx.ai Project ID (required for IBM watsonx.ai)", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": true, + "name": "project_id", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "WATSONX_PROJECT_ID" + }, + "provider": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Model Provider", + "dynamic": false, + "external_options": {}, + "info": "Select the embedding model provider", + "name": "provider", + "options": [ + "OpenAI", + "Ollama", + "IBM watsonx.ai" + ], + "options_metadata": [ + { + "icon": "OpenAI" + }, + { + "icon": "Ollama" + }, + { + "icon": "WatsonxAI" + } + ], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "selected_metadata": { + "icon": "WatsonxAI" + }, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "IBM watsonx.ai" + }, + "request_timeout": { + "_input_type": "FloatInput", + "advanced": true, + "display_name": "Request Timeout", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "request_timeout", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "float", + "value": "" + }, + "show_progress_bar": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Show Progress Bar", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "show_progress_bar", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": false + }, + "truncate_input_tokens": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Truncate Input Tokens", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "truncate_input_tokens", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 200 + } + }, + "tool_mode": false + }, + "showNode": true, + "type": "EmbeddingModel" + }, + "dragging": false, + "id": "EmbeddingModel-zLHKs", + "measured": { + "height": 534, + "width": 320 + }, + "position": { + "x": 488.01930107919225, + "y": 914.6994738821654 }, "selected": false, "type": "genericNode" } ], "viewport": { - "x": 91.72060325190853, - "y": -32.331644762411656, - "zoom": 0.6206869895512208 + "x": -62.28000558582107, + "y": -442.0218158718001, + "zoom": 0.5391627896946471 } }, "description": "OpenRAG OpenSearch Agent", "endpoint_name": null, "id": "1098eea1-6649-4e1d-aed1-b77249fb8dd0", "is_component": false, - "last_tested_version": "1.7.0.dev19", + "last_tested_version": "1.7.0", "name": "OpenRAG OpenSearch Agent", "tags": [ "assistants", diff --git a/flows/openrag_nudges.json b/flows/openrag_nudges.json index 702353a3..32c4e0c6 100644 --- a/flows/openrag_nudges.json +++ b/flows/openrag_nudges.json @@ -29,35 +29,6 @@ "target": "Prompt Template-Wo6kR", "targetHandle": "{œfieldNameœ:œdocsœ,œidœ:œPrompt Template-Wo6kRœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" }, - { - "animated": false, - "className": "", - "data": { - "sourceHandle": { - "dataType": "OpenSearchVectorStoreComponent", - "id": "OpenSearch-iYfjf", - "name": "dataframe", - "output_types": [ - "DataFrame" - ] - }, - "targetHandle": { - "fieldName": "input_data", - "id": "ParserComponent-tZs7s", - "inputTypes": [ - "DataFrame", - "Data" - ], - "type": "other" - } - }, - "id": "xy-edge__OpenSearch-iYfjf{œdataTypeœ:œOpenSearchVectorStoreComponentœ,œidœ:œOpenSearch-iYfjfœ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}-ParserComponent-tZs7s{œfieldNameœ:œinput_dataœ,œidœ:œParserComponent-tZs7sœ,œinputTypesœ:[œDataFrameœ,œDataœ],œtypeœ:œotherœ}", - "selected": false, - "source": "OpenSearch-iYfjf", - "sourceHandle": "{œdataTypeœ:œOpenSearchVectorStoreComponentœ,œidœ:œOpenSearch-iYfjfœ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}", - "target": "ParserComponent-tZs7s", - "targetHandle": "{œfieldNameœ:œinput_dataœ,œidœ:œParserComponent-tZs7sœ,œinputTypesœ:[œDataFrameœ,œDataœ],œtypeœ:œotherœ}" - }, { "animated": false, "className": "", @@ -86,34 +57,6 @@ "target": "Prompt Template-Wo6kR", "targetHandle": "{œfieldNameœ:œpromptœ,œidœ:œPrompt Template-Wo6kRœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" }, - { - "animated": false, - "className": "", - "data": { - "sourceHandle": { - "dataType": "EmbeddingModel", - "id": "EmbeddingModel-26o1e", - "name": "embeddings", - "output_types": [ - "Embeddings" - ] - }, - "targetHandle": { - "fieldName": "embedding", - "id": "OpenSearch-iYfjf", - "inputTypes": [ - "Embeddings" - ], - "type": "other" - } - }, - "id": "xy-edge__EmbeddingModel-26o1e{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-26o1eœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-OpenSearch-iYfjf{œfieldNameœ:œembeddingœ,œidœ:œOpenSearch-iYfjfœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}", - "selected": false, - "source": "EmbeddingModel-26o1e", - "sourceHandle": "{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-26o1eœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}", - "target": "OpenSearch-iYfjf", - "targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearch-iYfjfœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}" - }, { "animated": false, "className": "", @@ -172,6 +115,90 @@ "target": "ChatOutput-axewE", "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-axewEœ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}" }, + { + "animated": false, + "className": "", + "data": { + "sourceHandle": { + "dataType": "EmbeddingModel", + "id": "EmbeddingModel-ooLFP", + "name": "embeddings", + "output_types": [ + "Embeddings" + ] + }, + "targetHandle": { + "fieldName": "embedding", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3", + "inputTypes": [ + "Embeddings" + ], + "type": "other" + } + }, + "id": "xy-edge__EmbeddingModel-ooLFP{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-ooLFPœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3œ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}", + "selected": false, + "source": "EmbeddingModel-ooLFP", + "sourceHandle": "{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-ooLFPœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}", + "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3", + "targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3œ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}" + }, + { + "animated": false, + "className": "", + "data": { + "sourceHandle": { + "dataType": "EmbeddingModel", + "id": "EmbeddingModel-EzcW6", + "name": "embeddings", + "output_types": [ + "Embeddings" + ] + }, + "targetHandle": { + "fieldName": "embedding", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3", + "inputTypes": [ + "Embeddings" + ], + "type": "other" + } + }, + "id": "xy-edge__EmbeddingModel-EzcW6{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-EzcW6œ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3œ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}", + "selected": false, + "source": "EmbeddingModel-EzcW6", + "sourceHandle": "{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-EzcW6œ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}", + "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3", + "targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3œ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}" + }, + { + "animated": false, + "className": "", + "data": { + "sourceHandle": { + "dataType": "EmbeddingModel", + "id": "EmbeddingModel-cONxU", + "name": "embeddings", + "output_types": [ + "Embeddings" + ] + }, + "targetHandle": { + "fieldName": "embedding", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3", + "inputTypes": [ + "Embeddings" + ], + "type": "other" + } + }, + "id": "xy-edge__EmbeddingModel-cONxU{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-cONxUœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3œ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}", + "selected": false, + "source": "EmbeddingModel-cONxU", + "sourceHandle": "{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-cONxUœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}", + "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3", + "targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3œ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}" + }, { "animated": false, "className": "", @@ -186,645 +213,51 @@ }, "targetHandle": { "fieldName": "filter_expression", - "id": "OpenSearch-iYfjf", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3", "inputTypes": [ "Message" ], "type": "str" } }, - "id": "xy-edge__TextInput-4cEHx{œdataTypeœ:œTextInputœ,œidœ:œTextInput-4cEHxœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-OpenSearch-iYfjf{œfieldNameœ:œfilter_expressionœ,œidœ:œOpenSearch-iYfjfœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "id": "xy-edge__TextInput-4cEHx{œdataTypeœ:œTextInputœ,œidœ:œTextInput-4cEHxœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3{œfieldNameœ:œfilter_expressionœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", "selected": false, "source": "TextInput-4cEHx", "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-4cEHxœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", - "target": "OpenSearch-iYfjf", - "targetHandle": "{œfieldNameœ:œfilter_expressionœ,œidœ:œOpenSearch-iYfjfœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" + "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3", + "targetHandle": "{œfieldNameœ:œfilter_expressionœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" + }, + { + "animated": false, + "className": "", + "data": { + "sourceHandle": { + "dataType": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3", + "name": "dataframe", + "output_types": [ + "DataFrame" + ] + }, + "targetHandle": { + "fieldName": "input_data", + "id": "ParserComponent-tZs7s", + "inputTypes": [ + "DataFrame", + "Data" + ], + "type": "other" + } + }, + "id": "xy-edge__OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3{œdataTypeœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3œ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}-ParserComponent-tZs7s{œfieldNameœ:œinput_dataœ,œidœ:œParserComponent-tZs7sœ,œinputTypesœ:[œDataFrameœ,œDataœ],œtypeœ:œotherœ}", + "selected": false, + "source": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3", + "sourceHandle": "{œdataTypeœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3œ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}", + "target": "ParserComponent-tZs7s", + "targetHandle": "{œfieldNameœ:œinput_dataœ,œidœ:œParserComponent-tZs7sœ,œinputTypesœ:[œDataFrameœ,œDataœ],œtypeœ:œotherœ}" } ], "nodes": [ - { - "data": { - "id": "OpenSearch-iYfjf", - "node": { - "base_classes": [ - "Data", - "DataFrame", - "VectorStore" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Store and search documents using OpenSearch with hybrid semantic and keyword search capabilities.", - "display_name": "OpenSearch", - "documentation": "", - "edited": true, - "field_order": [ - "docs_metadata", - "opensearch_url", - "index_name", - "engine", - "space_type", - "ef_construction", - "m", - "ingest_data", - "search_query", - "should_cache_vector_store", - "embedding", - "vector_field", - "number_of_results", - "filter_expression", - "auth_mode", - "username", - "password", - "jwt_token", - "jwt_header", - "bearer_prefix", - "use_ssl", - "verify_certs" - ], - "frozen": false, - "icon": "OpenSearch", - "legacy": false, - "metadata": { - "code_hash": "c81b23acb81a", - "dependencies": { - "dependencies": [ - { - "name": "opensearchpy", - "version": "2.8.0" - }, - { - "name": "lfx", - "version": null - } - ], - "total_dependencies": 2 - }, - "module": "custom_components.opensearch" - }, - "minimized": false, - "output_types": [], - "outputs": [ - { - "allows_loop": false, - "cache": true, - "display_name": "Search Results", - "group_outputs": false, - "hidden": null, - "method": "search_documents", - "name": "search_results", - "options": null, - "required_inputs": null, - "selected": "Data", - "tool_mode": true, - "types": [ - "Data" - ], - "value": "__UNDEFINED__" - }, - { - "allows_loop": false, - "cache": true, - "display_name": "DataFrame", - "group_outputs": false, - "hidden": null, - "method": "as_dataframe", - "name": "dataframe", - "options": null, - "required_inputs": null, - "selected": "DataFrame", - "tool_mode": true, - "types": [ - "DataFrame" - ], - "value": "__UNDEFINED__" - }, - { - "allows_loop": false, - "cache": true, - "display_name": "Vector Store Connection", - "group_outputs": false, - "hidden": false, - "method": "as_vector_store", - "name": "vectorstoreconnection", - "options": null, - "required_inputs": null, - "selected": "VectorStore", - "tool_mode": true, - "types": [ - "VectorStore" - ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "auth_mode": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Authentication Mode", - "dynamic": false, - "external_options": {}, - "info": "Authentication method: 'basic' for username/password authentication, or 'jwt' for JSON Web Token (Bearer) authentication.", - "load_from_db": false, - "name": "auth_mode", - "options": [ - "basic", - "jwt" - ], - "options_metadata": [], - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "jwt" - }, - "bearer_prefix": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Prefix 'Bearer '", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "name": "bearer_prefix", - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "bool", - "value": true - }, - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "code", - "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom typing import Any, List, Optional\n\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponent(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"]\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"]),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model being used (e.g., 'text-embedding-3-small'). \"\n \"Used to create dynamic vector field names and track which model embedded each document. \"\n \"Auto-detected from embedding component if not specified.\"\n ),\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=False,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=False,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=True,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from embedding component\n if hasattr(self, \"embedding\") and self.embedding:\n if hasattr(self.embedding, \"model\"):\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_name\"):\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'model' or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\n \"type\": \"keyword\"\n },\n \"embedding_dimensions\": {\n \"type\": \"integer\"\n }\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n raise ValueError(\n f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n )\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n self.log(self.ingest_data)\n client = self.build_client()\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n docs = self.ingest_data or []\n if not docs:\n self.log(\"No documents to ingest.\")\n return\n\n # Get embedding model name\n embedding_model = self._get_embedding_model_name()\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n self.log(f\"Using embedding model: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return self.embedding.embed_documents([chunk_text])[0]\n\n vectors: Optional[List[List[float]]] = None\n last_exception: Optional[Exception] = None\n delay = 1.0\n attempts = 0\n\n while attempts < 3:\n attempts += 1\n try:\n max_workers = min(max(len(texts), 1), 8)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= 3:\n logger.error(\n \"Embedding generation failed after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed (attempt %s/%s), retrying in %.1fs\",\n attempts,\n 3,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed: {last_exception}\" if last_exception else \"Embedding generation failed\"\n )\n\n if not vectors:\n self.log(\"No vectors generated from documents.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(\n f\"Failed to create index '{self.index_name}': {creation_error}\"\n )\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\n \"size\": 0,\n \"aggs\": {\n \"embedding_models\": {\n \"terms\": {\n \"field\": \"embedding_model\",\n \"size\": 10\n }\n }\n }\n }\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\n \"bool\": {\n \"filter\": filter_clauses\n }\n }\n\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n return models\n except Exception as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n return [self._get_embedding_model_name()]\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except Exception as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(\n f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\"\n )\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return True\n\n return False\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models in parallel\n query_embeddings = {}\n\n # Note: Langflow is synchronous, so we can't use true async here\n # But we log the intent for parallel processing\n logger.info(f\"Generating embeddings for {len(available_models)} models\")\n\n original_model_attr = getattr(self.embedding, \"model\", None)\n original_deployment_attr = getattr(self.embedding, \"deployment\", None)\n original_dimensions_attr = getattr(self.embedding, \"dimensions\", None)\n\n for model_name in available_models:\n try:\n # In a real async environment, these would run in parallel\n # For now, they run sequentially\n if hasattr(self.embedding, \"model\"):\n setattr(self.embedding, \"model\", model_name)\n if hasattr(self.embedding, \"deployment\"):\n setattr(self.embedding, \"deployment\", model_name)\n if hasattr(self.embedding, \"dimensions\"):\n setattr(self.embedding, \"dimensions\", None)\n vec = self.embedding.embed_query(q)\n query_embeddings[model_name] = vec\n logger.info(f\"Generated embedding for model: {model_name}\")\n except Exception as e:\n logger.error(f\"Failed to generate embedding for {model_name}: {e}\")\n\n if hasattr(self.embedding, \"model\"):\n setattr(self.embedding, \"model\", original_model_attr)\n if hasattr(self.embedding, \"deployment\"):\n setattr(self.embedding, \"deployment\", original_deployment_attr)\n if hasattr(self.embedding, \"dimensions\"):\n setattr(self.embedding, \"dimensions\", original_dimensions_attr)\n\n if not query_embeddings:\n msg = \"Failed to generate embeddings for any model\"\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n continue\n\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\n \"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)],\n \"minimum_should_match\": 1\n }\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models\"\n )\n\n try:\n resp = client.search(\n index=self.index_name, body=body, params={\"terminate_after\": 0}\n )\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n raw = self.search(self.search_query or \"\")\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n self.log(self.ingest_data)\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n if is_basic:\n build_config[\"jwt_token\"][\"value\"] = \"\"\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" - }, - "docs_metadata": { - "_input_type": "TableInput", - "advanced": false, - "display_name": "Document Metadata", - "dynamic": false, - "info": "Additional metadata key-value pairs to be added to all ingested documents. Useful for tagging documents with source information, categories, or other custom attributes.", - "input_types": [ - "Data" - ], - "is_list": true, - "list_add_label": "Add More", - "name": "docs_metadata", - "placeholder": "", - "required": false, - "show": true, - "table_icon": "Table", - "table_schema": [ - { - "description": "Key name", - "display_name": "Key", - "formatter": "text", - "name": "key", - "type": "str" - }, - { - "description": "Value of the metadata", - "display_name": "Value", - "formatter": "text", - "name": "value", - "type": "str" - } - ], - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "trigger_icon": "Table", - "trigger_text": "Open table", - "type": "table", - "value": [] - }, - "ef_construction": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "EF Construction", - "dynamic": false, - "info": "Size of the dynamic candidate list during index construction. Higher values improve recall but increase indexing time and memory usage.", - "list": false, - "list_add_label": "Add More", - "name": "ef_construction", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "int", - "value": 512 - }, - "embedding": { - "_input_type": "HandleInput", - "advanced": false, - "display_name": "Embedding", - "dynamic": false, - "info": "", - "input_types": [ - "Embeddings" - ], - "list": false, - "list_add_label": "Add More", - "name": "embedding", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_metadata": true, - "type": "other", - "value": "" - }, - "engine": { - "_input_type": "DropdownInput", - "advanced": true, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Vector Engine", - "dynamic": false, - "external_options": {}, - "info": "Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.", - "name": "engine", - "options": [ - "jvector", - "nmslib", - "faiss", - "lucene" - ], - "options_metadata": [], - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "jvector" - }, - "filter_expression": { - "_input_type": "MultilineInput", - "advanced": false, - "copy_field": false, - "display_name": "Search Filters (JSON)", - "dynamic": false, - "info": "Optional JSON configuration for search filtering, result limits, and score thresholds.\n\nFormat 1 - Explicit filters:\n{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, {\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\n\nFormat 2 - Context-style mapping:\n{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\n\nUse __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "multiline": true, - "name": "filter_expression", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "index_name": { - "_input_type": "StrInput", - "advanced": false, - "display_name": "Index Name", - "dynamic": false, - "info": "The OpenSearch index name where documents will be stored and searched. Will be created automatically if it doesn't exist.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "index_name", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "documents" - }, - "ingest_data": { - "_input_type": "HandleInput", - "advanced": false, - "display_name": "Ingest Data", - "dynamic": false, - "info": "", - "input_types": [ - "Data", - "DataFrame" - ], - "list": true, - "list_add_label": "Add More", - "name": "ingest_data", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_metadata": true, - "type": "other", - "value": "" - }, - "jwt_header": { - "_input_type": "StrInput", - "advanced": true, - "display_name": "JWT Header Name", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "jwt_header", - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "Authorization" - }, - "jwt_token": { - "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "JWT Token", - "dynamic": false, - "info": "Valid JSON Web Token for authentication. Will be sent in the Authorization header (with optional 'Bearer ' prefix).", - "input_types": [], - "load_from_db": true, - "name": "jwt_token", - "password": true, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "JWT" - }, - "m": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "M Parameter", - "dynamic": false, - "info": "Number of bidirectional connections for each vector in the HNSW graph. Higher values improve search quality but increase memory usage and indexing time.", - "list": false, - "list_add_label": "Add More", - "name": "m", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "int", - "value": 16 - }, - "number_of_results": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "Default Result Limit", - "dynamic": false, - "info": "Default maximum number of search results to return when no limit is specified in the filter expression.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "number_of_results", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "int", - "value": 4 - }, - "opensearch_url": { - "_input_type": "StrInput", - "advanced": false, - "display_name": "OpenSearch URL", - "dynamic": false, - "info": "The connection URL for your OpenSearch cluster (e.g., http://localhost:9200 for local development or your cloud endpoint).", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "opensearch_url", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "https://opensearch:9200" - }, - "password": { - "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "OpenSearch Password", - "dynamic": false, - "info": "", - "input_types": [], - "load_from_db": false, - "name": "password", - "password": true, - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "type": "str", - "value": "o8h@xwLus123o" - }, - "search_query": { - "_input_type": "QueryInput", - "advanced": false, - "display_name": "Search Query", - "dynamic": false, - "info": "Enter a query to run a similarity search.", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "search_query", - "placeholder": "Enter a query...", - "required": false, - "show": true, - "title_case": false, - "tool_mode": true, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "query", - "value": "" - }, - "should_cache_vector_store": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Cache Vector Store", - "dynamic": false, - "info": "If True, the vector store will be cached for the current build of the component. This is useful for components that have multiple output methods and want to share the same vector store.", - "list": false, - "list_add_label": "Add More", - "name": "should_cache_vector_store", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "bool", - "value": true - }, - "space_type": { - "_input_type": "DropdownInput", - "advanced": true, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Distance Metric", - "dynamic": false, - "external_options": {}, - "info": "Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, 'cosinesimil' for cosine similarity, 'innerproduct' for dot product.", - "name": "space_type", - "options": [ - "l2", - "l1", - "cosinesimil", - "linf", - "innerproduct" - ], - "options_metadata": [], - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "l2" - }, - "use_ssl": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Use SSL/TLS", - "dynamic": false, - "info": "Enable SSL/TLS encryption for secure connections to OpenSearch.", - "list": false, - "list_add_label": "Add More", - "name": "use_ssl", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "bool", - "value": true - }, - "username": { - "_input_type": "StrInput", - "advanced": false, - "display_name": "Username", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "username", - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "admin" - }, - "vector_field": { - "_input_type": "StrInput", - "advanced": true, - "display_name": "Vector Field Name", - "dynamic": false, - "info": "Name of the field in OpenSearch documents that stores the vector embeddings for similarity search.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "vector_field", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "chunk_embedding" - }, - "verify_certs": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Verify SSL Certificates", - "dynamic": false, - "info": "Verify SSL certificates when connecting. Disable for self-signed certificates in development environments.", - "list": false, - "list_add_label": "Add More", - "name": "verify_certs", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "bool", - "value": false - } - }, - "tool_mode": false - }, - "selected_output": "dataframe", - "showNode": true, - "type": "OpenSearchVectorStoreComponent" - }, - "dragging": false, - "id": "OpenSearch-iYfjf", - "measured": { - "height": 822, - "width": 320 - }, - "position": { - "x": 876.8213370559117, - "y": 136.89961010992386 - }, - "selected": false, - "type": "genericNode" - }, { "data": { "id": "Prompt Template-Wo6kR", @@ -1006,8 +439,8 @@ "width": 320 }, "position": { - "x": 1669.0365272581178, - "y": 712.1086273287026 + "x": 1321.0365272581178, + "y": 1260.1086273287026 }, "selected": false, "type": "genericNode" @@ -1026,7 +459,7 @@ "custom_fields": {}, "description": "Extracts text using a template.", "display_name": "Parser", - "documentation": "https://docs.langflow.org/components-processing#parser", + "documentation": "https://docs.langflow.org/parser", "edited": false, "field_order": [ "input_data", @@ -1038,7 +471,7 @@ "icon": "braces", "legacy": false, "metadata": { - "code_hash": "17514953c7e8", + "code_hash": "3cda25c3f7b5", "dependencies": { "dependencies": [ { @@ -1058,6 +491,7 @@ "cache": true, "display_name": "Parsed Text", "group_outputs": false, + "loop_types": null, "method": "parse_combined_text", "name": "parsed_text", "options": null, @@ -1089,7 +523,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from lfx.custom.custom_component.component import Component\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, HandleInput, MessageTextInput, MultilineInput, TabInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.template.field.base import Output\n\n\nclass ParserComponent(Component):\n display_name = \"Parser\"\n description = \"Extracts text using a template.\"\n documentation: str = \"https://docs.langflow.org/components-processing#parser\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"input_data\",\n display_name=\"Data or DataFrame\",\n input_types=[\"DataFrame\", \"Data\"],\n info=\"Accepts either a DataFrame or a Data object.\",\n required=True,\n ),\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Parser\", \"Stringify\"],\n value=\"Parser\",\n info=\"Convert into raw string instead of using a template.\",\n real_time_refresh=True,\n ),\n MultilineInput(\n name=\"pattern\",\n display_name=\"Template\",\n info=(\n \"Use variables within curly brackets to extract column values for DataFrames \"\n \"or key values for Data.\"\n \"For example: `Name: {Name}, Age: {Age}, Country: {Country}`\"\n ),\n value=\"Text: {text}\", # Example default\n dynamic=True,\n show=True,\n required=True,\n ),\n MessageTextInput(\n name=\"sep\",\n display_name=\"Separator\",\n advanced=True,\n value=\"\\n\",\n info=\"String used to separate rows/items.\",\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"Parsed Text\",\n name=\"parsed_text\",\n info=\"Formatted text output.\",\n method=\"parse_combined_text\",\n ),\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n \"\"\"Dynamically hide/show `template` and enforce requirement based on `stringify`.\"\"\"\n if field_name == \"mode\":\n build_config[\"pattern\"][\"show\"] = self.mode == \"Parser\"\n build_config[\"pattern\"][\"required\"] = self.mode == \"Parser\"\n if field_value:\n clean_data = BoolInput(\n name=\"clean_data\",\n display_name=\"Clean Data\",\n info=(\n \"Enable to clean the data by removing empty rows and lines \"\n \"in each cell of the DataFrame/ Data object.\"\n ),\n value=True,\n advanced=True,\n required=False,\n )\n build_config[\"clean_data\"] = clean_data.to_dict()\n else:\n build_config.pop(\"clean_data\", None)\n\n return build_config\n\n def _clean_args(self):\n \"\"\"Prepare arguments based on input type.\"\"\"\n input_data = self.input_data\n\n match input_data:\n case list() if all(isinstance(item, Data) for item in input_data):\n msg = \"List of Data objects is not supported.\"\n raise ValueError(msg)\n case DataFrame():\n return input_data, None\n case Data():\n return None, input_data\n case dict() if \"data\" in input_data:\n try:\n if \"columns\" in input_data: # Likely a DataFrame\n return DataFrame.from_dict(input_data), None\n # Likely a Data object\n return None, Data(**input_data)\n except (TypeError, ValueError, KeyError) as e:\n msg = f\"Invalid structured input provided: {e!s}\"\n raise ValueError(msg) from e\n case _:\n msg = f\"Unsupported input type: {type(input_data)}. Expected DataFrame or Data.\"\n raise ValueError(msg)\n\n def parse_combined_text(self) -> Message:\n \"\"\"Parse all rows/items into a single text or convert input to string if `stringify` is enabled.\"\"\"\n # Early return for stringify option\n if self.mode == \"Stringify\":\n return self.convert_to_string()\n\n df, data = self._clean_args()\n\n lines = []\n if df is not None:\n for _, row in df.iterrows():\n formatted_text = self.pattern.format(**row.to_dict())\n lines.append(formatted_text)\n elif data is not None:\n # Use format_map with a dict that returns default_value for missing keys\n class DefaultDict(dict):\n def __missing__(self, key):\n return data.default_value or \"\"\n\n formatted_text = self.pattern.format_map(DefaultDict(data.data))\n lines.append(formatted_text)\n\n combined_text = self.sep.join(lines)\n self.status = combined_text\n return Message(text=combined_text)\n\n def convert_to_string(self) -> Message:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n result = \"\"\n if isinstance(self.input_data, list):\n result = \"\\n\".join([safe_convert(item, clean_data=self.clean_data or False) for item in self.input_data])\n else:\n result = safe_convert(self.input_data or False)\n self.log(f\"Converted to string with length: {len(result)}\")\n\n message = Message(text=result)\n self.status = message\n return message\n" + "value": "from lfx.custom.custom_component.component import Component\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, HandleInput, MessageTextInput, MultilineInput, TabInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.template.field.base import Output\n\n\nclass ParserComponent(Component):\n display_name = \"Parser\"\n description = \"Extracts text using a template.\"\n documentation: str = \"https://docs.langflow.org/parser\"\n icon = \"braces\"\n\n inputs = [\n HandleInput(\n name=\"input_data\",\n display_name=\"Data or DataFrame\",\n input_types=[\"DataFrame\", \"Data\"],\n info=\"Accepts either a DataFrame or a Data object.\",\n required=True,\n ),\n TabInput(\n name=\"mode\",\n display_name=\"Mode\",\n options=[\"Parser\", \"Stringify\"],\n value=\"Parser\",\n info=\"Convert into raw string instead of using a template.\",\n real_time_refresh=True,\n ),\n MultilineInput(\n name=\"pattern\",\n display_name=\"Template\",\n info=(\n \"Use variables within curly brackets to extract column values for DataFrames \"\n \"or key values for Data.\"\n \"For example: `Name: {Name}, Age: {Age}, Country: {Country}`\"\n ),\n value=\"Text: {text}\", # Example default\n dynamic=True,\n show=True,\n required=True,\n ),\n MessageTextInput(\n name=\"sep\",\n display_name=\"Separator\",\n advanced=True,\n value=\"\\n\",\n info=\"String used to separate rows/items.\",\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"Parsed Text\",\n name=\"parsed_text\",\n info=\"Formatted text output.\",\n method=\"parse_combined_text\",\n ),\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n \"\"\"Dynamically hide/show `template` and enforce requirement based on `stringify`.\"\"\"\n if field_name == \"mode\":\n build_config[\"pattern\"][\"show\"] = self.mode == \"Parser\"\n build_config[\"pattern\"][\"required\"] = self.mode == \"Parser\"\n if field_value:\n clean_data = BoolInput(\n name=\"clean_data\",\n display_name=\"Clean Data\",\n info=(\n \"Enable to clean the data by removing empty rows and lines \"\n \"in each cell of the DataFrame/ Data object.\"\n ),\n value=True,\n advanced=True,\n required=False,\n )\n build_config[\"clean_data\"] = clean_data.to_dict()\n else:\n build_config.pop(\"clean_data\", None)\n\n return build_config\n\n def _clean_args(self):\n \"\"\"Prepare arguments based on input type.\"\"\"\n input_data = self.input_data\n\n match input_data:\n case list() if all(isinstance(item, Data) for item in input_data):\n msg = \"List of Data objects is not supported.\"\n raise ValueError(msg)\n case DataFrame():\n return input_data, None\n case Data():\n return None, input_data\n case dict() if \"data\" in input_data:\n try:\n if \"columns\" in input_data: # Likely a DataFrame\n return DataFrame.from_dict(input_data), None\n # Likely a Data object\n return None, Data(**input_data)\n except (TypeError, ValueError, KeyError) as e:\n msg = f\"Invalid structured input provided: {e!s}\"\n raise ValueError(msg) from e\n case _:\n msg = f\"Unsupported input type: {type(input_data)}. Expected DataFrame or Data.\"\n raise ValueError(msg)\n\n def parse_combined_text(self) -> Message:\n \"\"\"Parse all rows/items into a single text or convert input to string if `stringify` is enabled.\"\"\"\n # Early return for stringify option\n if self.mode == \"Stringify\":\n return self.convert_to_string()\n\n df, data = self._clean_args()\n\n lines = []\n if df is not None:\n for _, row in df.iterrows():\n formatted_text = self.pattern.format(**row.to_dict())\n lines.append(formatted_text)\n elif data is not None:\n # Use format_map with a dict that returns default_value for missing keys\n class DefaultDict(dict):\n def __missing__(self, key):\n return data.default_value or \"\"\n\n formatted_text = self.pattern.format_map(DefaultDict(data.data))\n lines.append(formatted_text)\n\n combined_text = self.sep.join(lines)\n self.status = combined_text\n return Message(text=combined_text)\n\n def convert_to_string(self) -> Message:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n result = \"\"\n if isinstance(self.input_data, list):\n result = \"\\n\".join([safe_convert(item, clean_data=self.clean_data or False) for item in self.input_data])\n else:\n result = safe_convert(self.input_data or False)\n self.log(f\"Converted to string with length: {len(result)}\")\n\n message = Message(text=result)\n self.status = message\n return message\n" }, "input_data": { "_input_type": "HandleInput", @@ -1104,11 +538,13 @@ "list": false, "list_add_label": "Add More", "name": "input_data", + "override_skip": false, "placeholder": "", "required": true, "show": true, "title_case": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "other", "value": "" }, @@ -1123,6 +559,7 @@ "Parser", "Stringify" ], + "override_skip": false, "placeholder": "", "real_time_refresh": true, "required": false, @@ -1130,12 +567,14 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "tab", "value": "Parser" }, "pattern": { "_input_type": "MultilineInput", "advanced": false, + "ai_enabled": false, "copy_field": false, "display_name": "Template", "dynamic": true, @@ -1148,6 +587,7 @@ "load_from_db": false, "multiline": true, "name": "pattern", + "override_skip": false, "placeholder": "", "required": true, "show": true, @@ -1155,6 +595,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "{text}" }, @@ -1171,6 +612,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "sep", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1178,6 +620,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "\n" } @@ -1194,14 +637,16 @@ "width": 320 }, "position": { - "x": 1282.0613788430787, - "y": 564.2200355777322 + "x": 854.0613788430787, + "y": 1204.2200355777322 }, "selected": false, "type": "genericNode" }, { "data": { + "description": "Get chat inputs from the Playground.", + "display_name": "Chat Input", "id": "ChatInput-7W1BE", "node": { "base_classes": [ @@ -1212,7 +657,7 @@ "custom_fields": {}, "description": "Get chat inputs from the Playground.", "display_name": "Chat Input", - "documentation": "https://docs.langflow.org/components-io#chat-input", + "documentation": "https://docs.langflow.org/chat-input-and-output", "edited": false, "field_order": [ "input_value", @@ -1227,17 +672,17 @@ "icon": "MessagesSquare", "legacy": false, "metadata": { - "code_hash": "0014a5b41817", + "code_hash": "7a26c54d89ed", "dependencies": { "dependencies": [ { "name": "lfx", - "version": "0.1.13.dev9" + "version": null } ], "total_dependencies": 1 }, - "module": "lfx.components.input_output.chat.ChatInput" + "module": "custom_components.chat_input" }, "minimized": true, "output_types": [], @@ -1247,8 +692,11 @@ "cache": true, "display_name": "Chat Message", "group_outputs": false, + "loop_types": null, "method": "message_response", "name": "message", + "options": null, + "required_inputs": null, "selected": "Message", "tool_mode": true, "types": [ @@ -1276,7 +724,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from lfx.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-input\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n # Ensure files is a list and filter out empty/None values\n files = self.files if self.files else []\n if files and not isinstance(files, list):\n files = [files]\n # Filter out None/empty values\n files = [f for f in files if f is not None and f != \"\"]\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n context_id=self.context_id,\n files=files,\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" + "value": "from lfx.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/chat-input-and-output\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n # Ensure files is a list and filter out empty/None values\n files = self.files if self.files else []\n if files and not isinstance(files, list):\n files = [files]\n # Filter out None/empty values\n files = [f for f in files if f is not None and f != \"\"]\n\n session_id = self.session_id or self.graph.session_id or \"\"\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=session_id,\n context_id=self.context_id,\n files=files,\n )\n if session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" }, "context_id": { "_input_type": "MessageTextInput", @@ -1291,6 +739,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "context_id", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1298,6 +747,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -1336,6 +786,7 @@ "list": true, "list_add_label": "Add More", "name": "files", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1343,12 +794,14 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "file", "value": "" }, "input_value": { "_input_type": "MultilineInput", "advanced": false, + "ai_enabled": false, "copy_field": false, "display_name": "Input Text", "dynamic": false, @@ -1359,6 +812,7 @@ "load_from_db": false, "multiline": true, "name": "input_value", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1366,6 +820,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -1384,6 +839,7 @@ "User" ], "options_metadata": [], + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1391,6 +847,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "User" }, @@ -1407,6 +864,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "sender_name", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1414,6 +872,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "User" }, @@ -1430,6 +889,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "session_id", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1437,6 +897,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -1449,12 +910,14 @@ "list": false, "list_add_label": "Add More", "name": "should_store_message", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "bool", "value": true } @@ -1471,14 +934,16 @@ "width": 192 }, "position": { - "x": 1288.6037146218582, - "y": 1074.0144323522718 + "x": 980.6037146218582, + "y": 1642.0144323522718 }, "selected": false, "type": "genericNode" }, { "data": { + "description": "Display a chat message in the Playground.", + "display_name": "Chat Output", "id": "ChatOutput-axewE", "node": { "base_classes": [ @@ -1489,7 +954,7 @@ "custom_fields": {}, "description": "Display a chat message in the Playground.", "display_name": "Chat Output", - "documentation": "https://docs.langflow.org/components-io#chat-output", + "documentation": "https://docs.langflow.org/chat-input-and-output", "edited": false, "field_order": [ "input_value", @@ -1505,7 +970,7 @@ "icon": "MessagesSquare", "legacy": false, "metadata": { - "code_hash": "4848ad3e35d5", + "code_hash": "cae45e2d53f6", "dependencies": { "dependencies": [ { @@ -1514,16 +979,16 @@ }, { "name": "fastapi", - "version": "0.119.1" + "version": "0.120.0" }, { "name": "lfx", - "version": "0.1.13.dev9" + "version": null } ], "total_dependencies": 3 }, - "module": "lfx.components.input_output.chat_output.ChatOutput" + "module": "custom_components.chat_output" }, "minimized": true, "output_types": [], @@ -1533,8 +998,11 @@ "cache": true, "display_name": "Output Message", "group_outputs": false, + "loop_types": null, "method": "message_response", "name": "message", + "options": null, + "required_inputs": null, "selected": "Message", "tool_mode": true, "types": [ @@ -1555,12 +1023,14 @@ "list": false, "list_add_label": "Add More", "name": "clean_data", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "bool", "value": true }, @@ -1580,7 +1050,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n advanced=True,\n info=\"Whether to clean data before converting to string.\",\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, _, display_name, source_id = self.get_properties_from_source_component()\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.context_id = self.context_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n clean_data: bool = getattr(self, \"clean_data\", False)\n return \"\\n\".join([safe_convert(item, clean_data=clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" + "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/chat-input-and-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n advanced=True,\n info=\"Whether to clean data before converting to string.\",\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, _, display_name, source_id = self.get_properties_from_source_component()\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message) and not self.is_connected_to_chat_input():\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id or self.graph.session_id or \"\"\n message.context_id = self.context_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n\n # Store message if needed\n if message.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n clean_data: bool = getattr(self, \"clean_data\", False)\n return \"\\n\".join([safe_convert(item, clean_data=clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" }, "context_id": { "_input_type": "MessageTextInput", @@ -1595,6 +1065,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "context_id", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1602,6 +1073,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -1618,6 +1090,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "data_template", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1625,6 +1098,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "{text}" }, @@ -1642,11 +1116,13 @@ "list": false, "list_add_label": "Add More", "name": "input_value", + "override_skip": false, "placeholder": "", "required": true, "show": true, "title_case": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "other", "value": "" }, @@ -1665,6 +1141,7 @@ "User" ], "options_metadata": [], + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1672,6 +1149,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "Machine" }, @@ -1688,6 +1166,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "sender_name", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1695,6 +1174,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "AI" }, @@ -1711,6 +1191,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "session_id", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1718,6 +1199,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -1730,12 +1212,14 @@ "list": false, "list_add_label": "Add More", "name": "should_store_message", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "bool", "value": true } @@ -1752,481 +1236,8 @@ "width": 192 }, "position": { - "x": 2682.464573109284, - "y": 850.0186854555118 - }, - "selected": false, - "type": "genericNode" - }, - { - "data": { - "id": "EmbeddingModel-26o1e", - "node": { - "base_classes": [ - "Embeddings" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Generate embeddings using a specified provider.", - "display_name": "Embedding Model", - "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": false, - "field_order": [ - "provider", - "api_base", - "ollama_base_url", - "base_url_ibm_watsonx", - "model", - "api_key", - "project_id", - "dimensions", - "chunk_size", - "request_timeout", - "max_retries", - "show_progress_bar", - "model_kwargs", - "truncate_input_tokens", - "input_text" - ], - "frozen": false, - "icon": "binary", - "legacy": false, - "metadata": { - "code_hash": "c5e0a4535a27", - "dependencies": { - "dependencies": [ - { - "name": "requests", - "version": "2.32.5" - }, - { - "name": "ibm_watsonx_ai", - "version": "1.4.2" - }, - { - "name": "langchain_openai", - "version": "0.3.23" - }, - { - "name": "lfx", - "version": "0.2.0.dev19" - }, - { - "name": "langchain_ollama", - "version": "0.3.10" - }, - { - "name": "langchain_community", - "version": "0.3.21" - }, - { - "name": "langchain_ibm", - "version": "0.3.19" - } - ], - "total_dependencies": 7 - }, - "module": "custom_components.embedding_model" - }, - "minimized": false, - "output_types": [], - "outputs": [ - { - "allows_loop": false, - "cache": true, - "display_name": "Embedding Model", - "group_outputs": false, - "method": "build_embeddings", - "name": "embeddings", - "options": null, - "required_inputs": null, - "selected": "Embeddings", - "tool_mode": true, - "types": [ - "Embeddings" - ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "api_base": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "API Base URL", - "dynamic": false, - "info": "Base URL for the API. Leave empty for default.", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "api_base", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "" - }, - "api_key": { - "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "OpenAI API Key", - "dynamic": false, - "info": "Model Provider API key", - "input_types": [], - "load_from_db": true, - "name": "api_key", - "password": true, - "placeholder": "", - "real_time_refresh": true, - "required": true, - "show": true, - "title_case": false, - "track_in_telemetry": false, - "type": "str", - "value": "OPENAI_API_KEY" - }, - "base_url_ibm_watsonx": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "watsonx API Endpoint", - "dynamic": false, - "external_options": {}, - "info": "The base URL of the API (IBM watsonx.ai only)", - "name": "base_url_ibm_watsonx", - "options": [ - "https://us-south.ml.cloud.ibm.com", - "https://eu-de.ml.cloud.ibm.com", - "https://eu-gb.ml.cloud.ibm.com", - "https://au-syd.ml.cloud.ibm.com", - "https://jp-tok.ml.cloud.ibm.com", - "https://ca-tor.ml.cloud.ibm.com" - ], - "options_metadata": [], - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": false, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "https://us-south.ml.cloud.ibm.com" - }, - "chunk_size": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "Chunk Size", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "name": "chunk_size", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "int", - "value": 1000 - }, - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n return OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n return OllamaEmbeddings(\n model=model,\n base_url=transformed_base_url or \"http://localhost:11434\",\n **model_kwargs,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n credentials = Credentials(\n api_key=self.api_key,\n url=base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\",\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n return WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" - }, - "dimensions": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "Dimensions", - "dynamic": false, - "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", - "list": false, - "list_add_label": "Add More", - "name": "dimensions", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "int", - "value": "" - }, - "input_text": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Include the original text in the output", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "name": "input_text", - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "bool", - "value": true - }, - "max_retries": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "Max Retries", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "name": "max_retries", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "int", - "value": 3 - }, - "model": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Name", - "dynamic": false, - "external_options": {}, - "info": "Select the embedding model to use", - "name": "model", - "options": [ - "text-embedding-3-small", - "text-embedding-3-large", - "text-embedding-ada-002" - ], - "options_metadata": [], - "placeholder": "", - "real_time_refresh": true, - "refresh_button": true, - "required": false, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "text-embedding-3-small" - }, - "model_kwargs": { - "_input_type": "DictInput", - "advanced": true, - "display_name": "Model Kwargs", - "dynamic": false, - "info": "Additional keyword arguments to pass to the model.", - "list": false, - "list_add_label": "Add More", - "name": "model_kwargs", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "track_in_telemetry": false, - "type": "dict", - "value": {} - }, - "ollama_base_url": { - "_input_type": "MessageTextInput", - "advanced": false, - "display_name": "Ollama API URL", - "dynamic": false, - "info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "ollama_base_url", - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "" - }, - "project_id": { - "_input_type": "MessageTextInput", - "advanced": false, - "display_name": "Project ID", - "dynamic": false, - "info": "IBM watsonx.ai Project ID (required for IBM watsonx.ai)", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "project_id", - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "" - }, - "provider": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Model Provider", - "dynamic": false, - "external_options": {}, - "info": "Select the embedding model provider", - "name": "provider", - "options": [ - "OpenAI", - "Ollama", - "IBM watsonx.ai" - ], - "options_metadata": [ - { - "icon": "OpenAI" - }, - { - "icon": "Ollama" - }, - { - "icon": "WatsonxAI" - } - ], - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "OpenAI" - }, - "request_timeout": { - "_input_type": "FloatInput", - "advanced": true, - "display_name": "Request Timeout", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "name": "request_timeout", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "float", - "value": "" - }, - "show_progress_bar": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Show Progress Bar", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "name": "show_progress_bar", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "bool", - "value": false - }, - "truncate_input_tokens": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "Truncate Input Tokens", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "name": "truncate_input_tokens", - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "int", - "value": 200 - } - }, - "tool_mode": false - }, - "showNode": true, - "type": "EmbeddingModel" - }, - "dragging": false, - "id": "EmbeddingModel-26o1e", - "measured": { - "height": 369, - "width": 320 - }, - "position": { - "x": 458.53651935928946, - "y": 353.80712243149895 + "x": 2262.464573109284, + "y": 1514.0186854555118 }, "selected": false, "type": "genericNode" @@ -2260,7 +1271,7 @@ ], "frozen": false, "icon": "brain-circuit", - "last_updated": "2025-11-24T18:03:45.208Z", + "last_updated": "2025-11-26T00:04:41.267Z", "legacy": false, "metadata": { "code_hash": "694ffc4b17b8", @@ -2313,6 +1324,7 @@ "cache": true, "display_name": "Model Response", "group_outputs": false, + "loop_types": null, "method": "text_response", "name": "text_output", "options": null, @@ -2329,6 +1341,7 @@ "cache": true, "display_name": "Language Model", "group_outputs": false, + "loop_types": null, "method": "build_model", "name": "model_output", "options": null, @@ -2344,6 +1357,12 @@ "pinned": false, "priority": 0, "template": { + "_frontend_node_flow_id": { + "value": "ebc01d31-1976-46ce-a385-b0240327226c" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, "_type": "Component", "api_key": { "_input_type": "SecretStrInput", @@ -2434,6 +1453,7 @@ "type": "str", "value": "" }, + "is_refresh": false, "model_name": { "_input_type": "DropdownInput", "advanced": false, @@ -2445,25 +1465,7 @@ "info": "Select the model to use", "name": "model_name", "options": [ - "gpt-4o-mini", - "gpt-4o", - "gpt-4.1", - "gpt-4.1-mini", - "gpt-4.1-nano", - "gpt-4-turbo", - "gpt-4-turbo-preview", - "gpt-4", - "gpt-3.5-turbo", - "gpt-5", - "gpt-5-mini", - "gpt-5-nano", - "gpt-5-chat-latest", - "o1", - "o3-mini", - "o3", - "o3-pro", - "o4-mini", - "o4-mini-high" + "gpt-4o" ], "options_metadata": [], "placeholder": "", @@ -2476,7 +1478,7 @@ "tool_mode": false, "trace_as_metadata": true, "type": "str", - "value": "gpt-4o-mini" + "value": "gpt-4o" }, "ollama_base_url": { "_input_type": "MessageTextInput", @@ -2651,8 +1653,8 @@ "width": 320 }, "position": { - "x": 2151.7120459180214, - "y": 419.8653051925172 + "x": 1791.7120459180214, + "y": 1163.865305192517 }, "selected": false, "type": "genericNode" @@ -2764,24 +1766,2244 @@ "width": 320 }, "position": { - "x": 460.5802529801176, - "y": 838.8308634009717 + "x": -381.0467835456134, + "y": 1685.1023245166507 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "description": "Generate embeddings using a specified provider.", + "display_name": "Embedding Model", + "id": "EmbeddingModel-ooLFP", + "node": { + "base_classes": [ + "Embeddings" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Generate embeddings using a specified provider.", + "display_name": "Embedding Model", + "documentation": "https://docs.langflow.org/components-embedding-models", + "edited": false, + "field_order": [ + "provider", + "api_base", + "ollama_base_url", + "base_url_ibm_watsonx", + "model", + "api_key", + "project_id", + "dimensions", + "chunk_size", + "request_timeout", + "max_retries", + "show_progress_bar", + "model_kwargs", + "truncate_input_tokens", + "input_text" + ], + "frozen": false, + "icon": "binary", + "last_updated": "2025-11-26T00:04:41.270Z", + "legacy": false, + "metadata": { + "code_hash": "9e44c83a5058", + "dependencies": { + "dependencies": [ + { + "name": "requests", + "version": "2.32.5" + }, + { + "name": "ibm_watsonx_ai", + "version": "1.4.2" + }, + { + "name": "langchain_openai", + "version": "0.3.23" + }, + { + "name": "lfx", + "version": null + }, + { + "name": "langchain_ollama", + "version": "0.3.10" + }, + { + "name": "langchain_community", + "version": "0.3.21" + }, + { + "name": "langchain_ibm", + "version": "0.3.19" + } + ], + "total_dependencies": 7 + }, + "module": "custom_components.embedding_model" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Embedding Model", + "group_outputs": false, + "loop_types": null, + "method": "build_embeddings", + "name": "embeddings", + "options": null, + "required_inputs": null, + "selected": "Embeddings", + "tool_mode": true, + "types": [ + "Embeddings" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "ebc01d31-1976-46ce-a385-b0240327226c" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, + "_type": "Component", + "api_base": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "API Base URL", + "dynamic": false, + "info": "Base URL for the API. Leave empty for default.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "api_base", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "api_key": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "OpenAI API Key", + "dynamic": false, + "info": "Model Provider API key", + "input_types": [], + "load_from_db": true, + "name": "api_key", + "override_skip": false, + "password": true, + "placeholder": "", + "real_time_refresh": true, + "required": true, + "show": true, + "title_case": false, + "track_in_telemetry": false, + "type": "str", + "value": "OPENAI_API_KEY" + }, + "base_url_ibm_watsonx": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "watsonx API Endpoint", + "dynamic": false, + "external_options": {}, + "info": "The base URL of the API (IBM watsonx.ai only)", + "name": "base_url_ibm_watsonx", + "options": [ + "https://us-south.ml.cloud.ibm.com", + "https://eu-de.ml.cloud.ibm.com", + "https://eu-gb.ml.cloud.ibm.com", + "https://au-syd.ml.cloud.ibm.com", + "https://jp-tok.ml.cloud.ibm.com", + "https://ca-tor.ml.cloud.ibm.com" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": false, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "https://us-south.ml.cloud.ibm.com" + }, + "chunk_size": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Chunk Size", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "chunk_size", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 1000 + }, + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + }, + "dimensions": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Dimensions", + "dynamic": false, + "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", + "list": false, + "list_add_label": "Add More", + "name": "dimensions", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": "" + }, + "input_text": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Include the original text in the output", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "input_text", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "is_refresh": false, + "max_retries": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Max Retries", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "max_retries", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 3 + }, + "model": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Model Name", + "dynamic": false, + "external_options": {}, + "info": "Select the embedding model to use", + "name": "model", + "options": [ + "text-embedding-3-small", + "text-embedding-3-large", + "text-embedding-ada-002" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "refresh_button": true, + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "text-embedding-3-small" + }, + "model_kwargs": { + "_input_type": "DictInput", + "advanced": true, + "display_name": "Model Kwargs", + "dynamic": false, + "info": "Additional keyword arguments to pass to the model.", + "list": false, + "list_add_label": "Add More", + "name": "model_kwargs", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "track_in_telemetry": false, + "type": "dict", + "value": {} + }, + "ollama_base_url": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "Ollama API URL", + "dynamic": false, + "info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "ollama_base_url", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "project_id": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "Project ID", + "dynamic": false, + "info": "IBM watsonx.ai Project ID (required for IBM watsonx.ai)", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "project_id", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "provider": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Model Provider", + "dynamic": false, + "external_options": {}, + "info": "Select the embedding model provider", + "name": "provider", + "options": [ + "OpenAI", + "Ollama", + "IBM watsonx.ai" + ], + "options_metadata": [ + { + "icon": "OpenAI" + }, + { + "icon": "Ollama" + }, + { + "icon": "WatsonxAI" + } + ], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "OpenAI" + }, + "request_timeout": { + "_input_type": "FloatInput", + "advanced": true, + "display_name": "Request Timeout", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "request_timeout", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "float", + "value": "" + }, + "show_progress_bar": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Show Progress Bar", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "show_progress_bar", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": false + }, + "truncate_input_tokens": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Truncate Input Tokens", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "truncate_input_tokens", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 200 + } + }, + "tool_mode": false + }, + "showNode": true, + "type": "EmbeddingModel" + }, + "dragging": false, + "id": "EmbeddingModel-ooLFP", + "measured": { + "height": 369, + "width": 320 + }, + "position": { + "x": -1091.341314577015, + "y": 1237.2170349466728 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3", + "node": { + "base_classes": [ + "Data", + "DataFrame", + "VectorStore" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.", + "display_name": "OpenSearch (Multi-Model Multi-Embedding)", + "documentation": "", + "edited": false, + "field_order": [ + "docs_metadata", + "opensearch_url", + "index_name", + "engine", + "space_type", + "ef_construction", + "m", + "num_candidates", + "ingest_data", + "search_query", + "should_cache_vector_store", + "embedding", + "embedding_model_name", + "vector_field", + "number_of_results", + "filter_expression", + "auth_mode", + "username", + "password", + "jwt_token", + "jwt_header", + "bearer_prefix", + "use_ssl", + "verify_certs" + ], + "frozen": false, + "icon": "OpenSearch", + "last_updated": "2025-11-25T23:38:50.335Z", + "legacy": false, + "metadata": { + "code_hash": "8c78d799fef4", + "dependencies": { + "dependencies": [ + { + "name": "opensearchpy", + "version": "2.8.0" + }, + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 2 + }, + "module": "lfx.components.elastic.opensearch_multimodal.OpenSearchVectorStoreComponentMultimodalMultiEmbedding" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Search Results", + "group_outputs": false, + "loop_types": null, + "method": "search_documents", + "name": "search_results", + "options": null, + "required_inputs": null, + "tool_mode": true, + "types": [ + "Data" + ], + "value": "__UNDEFINED__" + }, + { + "allows_loop": false, + "cache": true, + "display_name": "DataFrame", + "group_outputs": false, + "loop_types": null, + "method": "as_dataframe", + "name": "dataframe", + "options": null, + "required_inputs": null, + "selected": "DataFrame", + "tool_mode": true, + "types": [ + "DataFrame" + ], + "value": "__UNDEFINED__" + }, + { + "allows_loop": false, + "cache": true, + "display_name": "Vector Store Connection", + "group_outputs": false, + "hidden": false, + "loop_types": null, + "method": "as_vector_store", + "name": "vectorstoreconnection", + "options": null, + "required_inputs": null, + "tool_mode": true, + "types": [ + "VectorStore" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, + "_type": "Component", + "auth_mode": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Authentication Mode", + "dynamic": false, + "external_options": {}, + "info": "Authentication method: 'basic' for username/password authentication, or 'jwt' for JSON Web Token (Bearer) authentication.", + "name": "auth_mode", + "options": [ + "basic", + "jwt" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "jwt" + }, + "bearer_prefix": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Prefix 'Bearer '", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "bearer_prefix", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n self.log(self.ingest_data)\n client = self.build_client()\n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n docs = self.ingest_data or []\n if not docs:\n self.log(\"No documents to ingest.\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n if not embeddings_list:\n msg = \"At least one embedding is required to embed documents.\"\n raise ValueError(msg)\n\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n max_workers = min(max(len(texts), 1), 8)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n return [self._get_embedding_model_name()]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models and isinstance(available_models, dict):\n logger.info(f\"Embedding object {idx} provides {len(available_models)} models via available_models dict\")\n for model_name_key, dedicated_embedding in available_models.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n else:\n # No matching embedding found for this model\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available models: {list(embedding_by_model.keys())}\"\n )\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n\n if not query_embeddings:\n msg = \"Failed to generate embeddings for any model\"\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n raw = self.search(self.search_query or \"\")\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n self.log(self.ingest_data)\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" + }, + "docs_metadata": { + "_input_type": "TableInput", + "advanced": false, + "display_name": "Document Metadata", + "dynamic": false, + "info": "Additional metadata key-value pairs to be added to all ingested documents. Useful for tagging documents with source information, categories, or other custom attributes.", + "input_types": [ + "Data" + ], + "is_list": true, + "list_add_label": "Add More", + "name": "docs_metadata", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "table_icon": "Table", + "table_schema": [ + { + "description": "Key name", + "display_name": "Key", + "formatter": "text", + "name": "key", + "type": "str" + }, + { + "description": "Value of the metadata", + "display_name": "Value", + "formatter": "text", + "name": "value", + "type": "str" + } + ], + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "trigger_icon": "Table", + "trigger_text": "Open table", + "type": "table", + "value": [] + }, + "ef_construction": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "EF Construction", + "dynamic": false, + "info": "Size of the dynamic candidate list during index construction. Higher values improve recall but increase indexing time and memory usage.", + "list": false, + "list_add_label": "Add More", + "name": "ef_construction", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 512 + }, + "embedding": { + "_input_type": "HandleInput", + "advanced": false, + "display_name": "Embedding", + "dynamic": false, + "info": "", + "input_types": [ + "Embeddings" + ], + "list": true, + "list_add_label": "Add More", + "name": "embedding", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "other", + "value": "" + }, + "embedding_model_name": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "Embedding Model Name", + "dynamic": false, + "info": "Name of the embedding model to use for ingestion. This selects which embedding from the list will be used to embed documents. Matches on deployment, model, model_id, or model_name. For duplicate deployments, use combined format: 'deployment:model' (e.g., 'text-embedding-ada-002:text-embedding-3-large'). Leave empty to use the first embedding. Error message will show all available identifiers.", + "list": false, + "list_add_label": "Add More", + "load_from_db": true, + "name": "embedding_model_name", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "SELECTED_EMBEDDING_MODEL" + }, + "engine": { + "_input_type": "DropdownInput", + "advanced": true, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Vector Engine", + "dynamic": false, + "external_options": {}, + "info": "Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.", + "name": "engine", + "options": [ + "jvector", + "nmslib", + "faiss", + "lucene" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "jvector" + }, + "filter_expression": { + "_input_type": "MultilineInput", + "advanced": false, + "ai_enabled": false, + "copy_field": false, + "display_name": "Search Filters (JSON)", + "dynamic": false, + "info": "Optional JSON configuration for search filtering, result limits, and score thresholds.\n\nFormat 1 - Explicit filters:\n{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, {\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\n\nFormat 2 - Context-style mapping:\n{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\n\nUse __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "multiline": true, + "name": "filter_expression", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "index_name": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "Index Name", + "dynamic": false, + "info": "The OpenSearch index name where documents will be stored and searched. Will be created automatically if it doesn't exist.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "index_name", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "documents" + }, + "ingest_data": { + "_input_type": "HandleInput", + "advanced": false, + "display_name": "Ingest Data", + "dynamic": false, + "info": "", + "input_types": [ + "Data", + "DataFrame" + ], + "list": true, + "list_add_label": "Add More", + "name": "ingest_data", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "other", + "value": "" + }, + "is_refresh": false, + "jwt_header": { + "_input_type": "StrInput", + "advanced": true, + "display_name": "JWT Header Name", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "jwt_header", + "override_skip": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "Authorization" + }, + "jwt_token": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "JWT Token", + "dynamic": false, + "info": "Valid JSON Web Token for authentication. Will be sent in the Authorization header (with optional 'Bearer ' prefix).", + "input_types": [], + "load_from_db": true, + "name": "jwt_token", + "override_skip": false, + "password": true, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "track_in_telemetry": false, + "type": "str", + "value": "JWT" + }, + "m": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "M Parameter", + "dynamic": false, + "info": "Number of bidirectional connections for each vector in the HNSW graph. Higher values improve search quality but increase memory usage and indexing time.", + "list": false, + "list_add_label": "Add More", + "name": "m", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 16 + }, + "num_candidates": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Candidate Pool Size", + "dynamic": false, + "info": "Number of approximate neighbors to consider for each KNN query. Some OpenSearch deployments do not support this parameter; set to 0 to disable.", + "list": false, + "list_add_label": "Add More", + "name": "num_candidates", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 1000 + }, + "number_of_results": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Default Result Limit", + "dynamic": false, + "info": "Default maximum number of search results to return when no limit is specified in the filter expression.", + "list": false, + "list_add_label": "Add More", + "name": "number_of_results", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 10 + }, + "opensearch_url": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "OpenSearch URL", + "dynamic": false, + "info": "The connection URL for your OpenSearch cluster (e.g., http://localhost:9200 for local development or your cloud endpoint).", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "opensearch_url", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "https://opensearch:9200" + }, + "password": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "OpenSearch Password", + "dynamic": false, + "info": "", + "input_types": [], + "load_from_db": false, + "name": "password", + "override_skip": false, + "password": true, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "search_query": { + "_input_type": "QueryInput", + "advanced": false, + "display_name": "Search Query", + "dynamic": false, + "info": "Enter a query to run a similarity search.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "search_query", + "override_skip": false, + "placeholder": "Enter a query...", + "required": false, + "show": true, + "title_case": false, + "tool_mode": true, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "query", + "value": "" + }, + "should_cache_vector_store": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Cache Vector Store", + "dynamic": false, + "info": "If True, the vector store will be cached for the current build of the component. This is useful for components that have multiple output methods and want to share the same vector store.", + "list": false, + "list_add_label": "Add More", + "name": "should_cache_vector_store", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "space_type": { + "_input_type": "DropdownInput", + "advanced": true, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Distance Metric", + "dynamic": false, + "external_options": {}, + "info": "Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, 'cosinesimil' for cosine similarity, 'innerproduct' for dot product.", + "name": "space_type", + "options": [ + "l2", + "l1", + "cosinesimil", + "linf", + "innerproduct" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "l2" + }, + "use_ssl": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Use SSL/TLS", + "dynamic": false, + "info": "Enable SSL/TLS encryption for secure connections to OpenSearch.", + "list": false, + "list_add_label": "Add More", + "name": "use_ssl", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "username": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "Username", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "username", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "admin" + }, + "vector_field": { + "_input_type": "StrInput", + "advanced": true, + "display_name": "Legacy Vector Field Name", + "dynamic": false, + "info": "Legacy field name for backward compatibility. New documents use dynamic fields (chunk_embedding_{model_name}) based on the embedding_model_name.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "vector_field", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "chunk_embedding" + }, + "verify_certs": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Verify SSL Certificates", + "dynamic": false, + "info": "Verify SSL certificates when connecting. Disable for self-signed certificates in development environments.", + "list": false, + "list_add_label": "Add More", + "name": "verify_certs", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": false + } + }, + "tool_mode": false + }, + "selected_output": "dataframe", + "showNode": true, + "type": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding" + }, + "dragging": false, + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3", + "measured": { + "height": 904, + "width": 320 + }, + "position": { + "x": 387.88180968996585, + "y": 879.9328678310967 + }, + "selected": true, + "type": "genericNode" + }, + { + "data": { + "description": "Generate embeddings using a specified provider.", + "display_name": "Embedding Model", + "id": "EmbeddingModel-EzcW6", + "node": { + "base_classes": [ + "Embeddings" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Generate embeddings using a specified provider.", + "display_name": "Embedding Model", + "documentation": "https://docs.langflow.org/components-embedding-models", + "edited": false, + "field_order": [ + "provider", + "api_base", + "ollama_base_url", + "base_url_ibm_watsonx", + "model", + "api_key", + "project_id", + "dimensions", + "chunk_size", + "request_timeout", + "max_retries", + "show_progress_bar", + "model_kwargs", + "truncate_input_tokens", + "input_text" + ], + "frozen": false, + "icon": "binary", + "last_updated": "2025-11-26T00:04:41.271Z", + "legacy": false, + "metadata": { + "code_hash": "9e44c83a5058", + "dependencies": { + "dependencies": [ + { + "name": "requests", + "version": "2.32.5" + }, + { + "name": "ibm_watsonx_ai", + "version": "1.4.2" + }, + { + "name": "langchain_openai", + "version": "0.3.23" + }, + { + "name": "lfx", + "version": null + }, + { + "name": "langchain_ollama", + "version": "0.3.10" + }, + { + "name": "langchain_community", + "version": "0.3.21" + }, + { + "name": "langchain_ibm", + "version": "0.3.19" + } + ], + "total_dependencies": 7 + }, + "module": "custom_components.embedding_model" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Embedding Model", + "group_outputs": false, + "loop_types": null, + "method": "build_embeddings", + "name": "embeddings", + "options": null, + "required_inputs": null, + "selected": "Embeddings", + "tool_mode": true, + "types": [ + "Embeddings" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "ebc01d31-1976-46ce-a385-b0240327226c" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, + "_type": "Component", + "api_base": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "API Base URL", + "dynamic": false, + "info": "Base URL for the API. Leave empty for default.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "api_base", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "api_key": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "API Key (Optional)", + "dynamic": false, + "info": "Model Provider API key", + "input_types": [], + "load_from_db": true, + "name": "api_key", + "override_skip": false, + "password": true, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": false, + "title_case": false, + "track_in_telemetry": false, + "type": "str", + "value": "OPENAI_API_KEY" + }, + "base_url_ibm_watsonx": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "watsonx API Endpoint", + "dynamic": false, + "external_options": {}, + "info": "The base URL of the API (IBM watsonx.ai only)", + "name": "base_url_ibm_watsonx", + "options": [ + "https://us-south.ml.cloud.ibm.com", + "https://eu-de.ml.cloud.ibm.com", + "https://eu-gb.ml.cloud.ibm.com", + "https://au-syd.ml.cloud.ibm.com", + "https://jp-tok.ml.cloud.ibm.com", + "https://ca-tor.ml.cloud.ibm.com" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": false, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "https://us-south.ml.cloud.ibm.com" + }, + "chunk_size": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Chunk Size", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "chunk_size", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 1000 + }, + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + }, + "dimensions": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Dimensions", + "dynamic": false, + "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", + "list": false, + "list_add_label": "Add More", + "name": "dimensions", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": "" + }, + "input_text": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Include the original text in the output", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "input_text", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "is_refresh": false, + "max_retries": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Max Retries", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "max_retries", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 3 + }, + "model": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Model Name", + "dynamic": false, + "external_options": {}, + "info": "Select the embedding model to use", + "name": "model", + "options": [ + "bge-large:latest", + "qwen3-embedding:4b" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "refresh_button": true, + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "" + }, + "model_kwargs": { + "_input_type": "DictInput", + "advanced": true, + "display_name": "Model Kwargs", + "dynamic": false, + "info": "Additional keyword arguments to pass to the model.", + "list": false, + "list_add_label": "Add More", + "name": "model_kwargs", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "track_in_telemetry": false, + "type": "dict", + "value": {} + }, + "ollama_base_url": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "Ollama API URL", + "dynamic": false, + "info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": true, + "name": "ollama_base_url", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "OLLAMA_BASE_URL" + }, + "project_id": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "Project ID", + "dynamic": false, + "info": "IBM watsonx.ai Project ID (required for IBM watsonx.ai)", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "project_id", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "provider": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Model Provider", + "dynamic": false, + "external_options": {}, + "info": "Select the embedding model provider", + "name": "provider", + "options": [ + "OpenAI", + "Ollama", + "IBM watsonx.ai" + ], + "options_metadata": [ + { + "icon": "OpenAI" + }, + { + "icon": "Ollama" + }, + { + "icon": "WatsonxAI" + } + ], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "selected_metadata": { + "icon": "Ollama" + }, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "Ollama" + }, + "request_timeout": { + "_input_type": "FloatInput", + "advanced": true, + "display_name": "Request Timeout", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "request_timeout", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "float", + "value": "" + }, + "show_progress_bar": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Show Progress Bar", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "show_progress_bar", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": false + }, + "truncate_input_tokens": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Truncate Input Tokens", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "truncate_input_tokens", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 200 + } + }, + "tool_mode": false + }, + "showNode": true, + "type": "EmbeddingModel" + }, + "dragging": false, + "id": "EmbeddingModel-EzcW6", + "measured": { + "height": 369, + "width": 320 + }, + "position": { + "x": -742.3027218520097, + "y": 1224.367844475079 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "description": "Generate embeddings using a specified provider.", + "display_name": "Embedding Model", + "id": "EmbeddingModel-cONxU", + "node": { + "base_classes": [ + "Embeddings" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Generate embeddings using a specified provider.", + "display_name": "Embedding Model", + "documentation": "https://docs.langflow.org/components-embedding-models", + "edited": false, + "field_order": [ + "provider", + "api_base", + "ollama_base_url", + "base_url_ibm_watsonx", + "model", + "api_key", + "project_id", + "dimensions", + "chunk_size", + "request_timeout", + "max_retries", + "show_progress_bar", + "model_kwargs", + "truncate_input_tokens", + "input_text" + ], + "frozen": false, + "icon": "binary", + "last_updated": "2025-11-26T00:04:41.272Z", + "legacy": false, + "metadata": { + "code_hash": "9e44c83a5058", + "dependencies": { + "dependencies": [ + { + "name": "requests", + "version": "2.32.5" + }, + { + "name": "ibm_watsonx_ai", + "version": "1.4.2" + }, + { + "name": "langchain_openai", + "version": "0.3.23" + }, + { + "name": "lfx", + "version": null + }, + { + "name": "langchain_ollama", + "version": "0.3.10" + }, + { + "name": "langchain_community", + "version": "0.3.21" + }, + { + "name": "langchain_ibm", + "version": "0.3.19" + } + ], + "total_dependencies": 7 + }, + "module": "custom_components.embedding_model" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Embedding Model", + "group_outputs": false, + "loop_types": null, + "method": "build_embeddings", + "name": "embeddings", + "options": null, + "required_inputs": null, + "selected": "Embeddings", + "tool_mode": true, + "types": [ + "Embeddings" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "ebc01d31-1976-46ce-a385-b0240327226c" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, + "_type": "Component", + "api_base": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "API Base URL", + "dynamic": false, + "info": "Base URL for the API. Leave empty for default.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "api_base", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "api_key": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "IBM watsonx.ai API Key", + "dynamic": false, + "info": "Model Provider API key", + "input_types": [], + "load_from_db": true, + "name": "api_key", + "override_skip": false, + "password": true, + "placeholder": "", + "real_time_refresh": true, + "required": true, + "show": true, + "title_case": false, + "track_in_telemetry": false, + "type": "str", + "value": "WATSONX_API_KEY" + }, + "base_url_ibm_watsonx": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "watsonx API Endpoint", + "dynamic": false, + "external_options": {}, + "info": "The base URL of the API (IBM watsonx.ai only)", + "name": "base_url_ibm_watsonx", + "options": [ + "https://us-south.ml.cloud.ibm.com", + "https://eu-de.ml.cloud.ibm.com", + "https://eu-gb.ml.cloud.ibm.com", + "https://au-syd.ml.cloud.ibm.com", + "https://jp-tok.ml.cloud.ibm.com", + "https://ca-tor.ml.cloud.ibm.com" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "https://us-south.ml.cloud.ibm.com" + }, + "chunk_size": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Chunk Size", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "chunk_size", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 1000 + }, + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + }, + "dimensions": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Dimensions", + "dynamic": false, + "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", + "list": false, + "list_add_label": "Add More", + "name": "dimensions", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": "" + }, + "input_text": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Include the original text in the output", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "input_text", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "is_refresh": false, + "max_retries": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Max Retries", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "max_retries", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 3 + }, + "model": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Model Name", + "dynamic": false, + "external_options": {}, + "info": "Select the embedding model to use", + "name": "model", + "options": [ + "ibm/granite-embedding-278m-multilingual", + "ibm/slate-125m-english-rtrvr-v2", + "ibm/slate-30m-english-rtrvr-v2", + "intfloat/multilingual-e5-large", + "sentence-transformers/all-minilm-l6-v2" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "refresh_button": true, + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "ibm/granite-embedding-278m-multilingual" + }, + "model_kwargs": { + "_input_type": "DictInput", + "advanced": true, + "display_name": "Model Kwargs", + "dynamic": false, + "info": "Additional keyword arguments to pass to the model.", + "list": false, + "list_add_label": "Add More", + "name": "model_kwargs", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "track_in_telemetry": false, + "type": "dict", + "value": {} + }, + "ollama_base_url": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "Ollama API URL", + "dynamic": false, + "info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": true, + "name": "ollama_base_url", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "OLLAMA_BASE_URL" + }, + "project_id": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "Project ID", + "dynamic": false, + "info": "IBM watsonx.ai Project ID (required for IBM watsonx.ai)", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": true, + "name": "project_id", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "WATSONX_PROJECT_ID" + }, + "provider": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Model Provider", + "dynamic": false, + "external_options": {}, + "info": "Select the embedding model provider", + "name": "provider", + "options": [ + "OpenAI", + "Ollama", + "IBM watsonx.ai" + ], + "options_metadata": [ + { + "icon": "OpenAI" + }, + { + "icon": "Ollama" + }, + { + "icon": "WatsonxAI" + } + ], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "selected_metadata": { + "icon": "WatsonxAI" + }, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "IBM watsonx.ai" + }, + "request_timeout": { + "_input_type": "FloatInput", + "advanced": true, + "display_name": "Request Timeout", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "request_timeout", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "float", + "value": "" + }, + "show_progress_bar": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Show Progress Bar", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "show_progress_bar", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": false + }, + "truncate_input_tokens": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Truncate Input Tokens", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "truncate_input_tokens", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 200 + } + }, + "tool_mode": false + }, + "showNode": true, + "type": "EmbeddingModel" + }, + "dragging": false, + "id": "EmbeddingModel-cONxU", + "measured": { + "height": 534, + "width": 320 + }, + "position": { + "x": -376.6522664393833, + "y": 1053.725431820861 }, "selected": false, "type": "genericNode" } ], "viewport": { - "x": -206.89994136968403, - "y": 74.41941219497068, - "zoom": 0.6126010241500153 + "x": 411.76655513325625, + "y": -427.61404452215936, + "zoom": 0.6865723281252197 } }, "description": "OpenRAG OpenSearch Nudges generator, based on the OpenSearch documents and the chat history.", "endpoint_name": null, "id": "ebc01d31-1976-46ce-a385-b0240327226c", "is_component": false, - "last_tested_version": "1.7.0.dev19", + "last_tested_version": "1.7.0", "name": "OpenRAG OpenSearch Nudges", "tags": [ "assistants", diff --git a/flows/openrag_url_mcp.json b/flows/openrag_url_mcp.json index 0333faf1..a04b926e 100644 --- a/flows/openrag_url_mcp.json +++ b/flows/openrag_url_mcp.json @@ -1,35 +1,6 @@ { "data": { "edges": [ - { - "animated": false, - "className": "", - "data": { - "sourceHandle": { - "dataType": "SplitText", - "id": "SplitText-QIKhg", - "name": "dataframe", - "output_types": [ - "DataFrame" - ] - }, - "targetHandle": { - "fieldName": "ingest_data", - "id": "OpenSearchHybrid-Ve6bS", - "inputTypes": [ - "Data", - "DataFrame" - ], - "type": "other" - } - }, - "id": "xy-edge__SplitText-QIKhg{œdataTypeœ:œSplitTextœ,œidœ:œSplitText-QIKhgœ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}-OpenSearchHybrid-Ve6bS{œfieldNameœ:œingest_dataœ,œidœ:œOpenSearchHybrid-Ve6bSœ,œinputTypesœ:[œDataœ,œDataFrameœ],œtypeœ:œotherœ}", - "selected": false, - "source": "SplitText-QIKhg", - "sourceHandle": "{œdataTypeœ:œSplitTextœ,œidœ:œSplitText-QIKhgœ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}", - "target": "OpenSearchHybrid-Ve6bS", - "targetHandle": "{œfieldNameœ:œingest_dataœ,œidœ:œOpenSearchHybrid-Ve6bSœ,œinputTypesœ:[œDataœ,œDataFrameœ],œtypeœ:œotherœ}" - }, { "animated": false, "className": "", @@ -144,34 +115,6 @@ "target": "DataFrameOperations-RhKoe", "targetHandle": "{œfieldNameœ:œdfœ,œidœ:œDataFrameOperations-RhKoeœ,œinputTypesœ:[œDataFrameœ],œtypeœ:œotherœ}" }, - { - "animated": false, - "className": "", - "data": { - "sourceHandle": { - "dataType": "ChatInput", - "id": "ChatInput-sskrk", - "name": "message", - "output_types": [ - "Message" - ] - }, - "targetHandle": { - "fieldName": "new_column_value", - "id": "DataFrameOperations-hqIoy", - "inputTypes": [ - "Message" - ], - "type": "str" - } - }, - "id": "xy-edge__ChatInput-sskrk{œdataTypeœ:œChatInputœ,œidœ:œChatInput-sskrkœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-DataFrameOperations-hqIoy{œfieldNameœ:œnew_column_valueœ,œidœ:œDataFrameOperations-hqIoyœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", - "selected": false, - "source": "ChatInput-sskrk", - "sourceHandle": "{œdataTypeœ:œChatInputœ,œidœ:œChatInput-sskrkœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}", - "target": "DataFrameOperations-hqIoy", - "targetHandle": "{œfieldNameœ:œnew_column_valueœ,œidœ:œDataFrameOperations-hqIoyœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" - }, { "animated": false, "className": "", @@ -232,7 +175,173 @@ }, { "animated": false, - "className": "", + "data": { + "sourceHandle": { + "dataType": "SecretInput", + "id": "SecretInput-lr9k6", + "name": "text", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "dynamic_connector_type", + "id": "AdvancedDynamicFormBuilder-ziCu4", + "inputTypes": [ + "Text", + "Message" + ], + "type": "str" + } + }, + "id": "xy-edge__SecretInput-lr9k6{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-lr9k6œ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-ziCu4{œfieldNameœ:œdynamic_connector_typeœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", + "selected": false, + "source": "SecretInput-lr9k6", + "sourceHandle": "{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-lr9k6œ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "target": "AdvancedDynamicFormBuilder-ziCu4", + "targetHandle": "{œfieldNameœ:œdynamic_connector_typeœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" + }, + { + "animated": false, + "data": { + "sourceHandle": { + "dataType": "SecretInput", + "id": "SecretInput-KYwsB", + "name": "text", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "dynamic_owner", + "id": "AdvancedDynamicFormBuilder-ziCu4", + "inputTypes": [ + "Text", + "Message" + ], + "type": "str" + } + }, + "id": "xy-edge__SecretInput-KYwsB{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-KYwsBœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-ziCu4{œfieldNameœ:œdynamic_ownerœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", + "selected": false, + "source": "SecretInput-KYwsB", + "sourceHandle": "{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-KYwsBœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "target": "AdvancedDynamicFormBuilder-ziCu4", + "targetHandle": "{œfieldNameœ:œdynamic_ownerœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" + }, + { + "animated": false, + "data": { + "sourceHandle": { + "dataType": "SecretInput", + "id": "SecretInput-pYHMH", + "name": "text", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "dynamic_owner_email", + "id": "AdvancedDynamicFormBuilder-ziCu4", + "inputTypes": [ + "Text", + "Message" + ], + "type": "str" + } + }, + "id": "xy-edge__SecretInput-pYHMH{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-pYHMHœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-ziCu4{œfieldNameœ:œdynamic_owner_emailœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", + "selected": false, + "source": "SecretInput-pYHMH", + "sourceHandle": "{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-pYHMHœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "target": "AdvancedDynamicFormBuilder-ziCu4", + "targetHandle": "{œfieldNameœ:œdynamic_owner_emailœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" + }, + { + "animated": false, + "data": { + "sourceHandle": { + "dataType": "SecretInput", + "id": "SecretInput-aoBVB", + "name": "text", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "dynamic_owner_name", + "id": "AdvancedDynamicFormBuilder-ziCu4", + "inputTypes": [ + "Text", + "Message" + ], + "type": "str" + } + }, + "id": "xy-edge__SecretInput-aoBVB{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-aoBVBœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-ziCu4{œfieldNameœ:œdynamic_owner_nameœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}", + "selected": false, + "source": "SecretInput-aoBVB", + "sourceHandle": "{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-aoBVBœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "target": "AdvancedDynamicFormBuilder-ziCu4", + "targetHandle": "{œfieldNameœ:œdynamic_owner_nameœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}" + }, + { + "animated": false, + "data": { + "sourceHandle": { + "dataType": "AdvancedDynamicFormBuilder", + "id": "AdvancedDynamicFormBuilder-ziCu4", + "name": "form_data", + "output_types": [ + "Data" + ] + }, + "targetHandle": { + "fieldName": "docs_metadata", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGV", + "inputTypes": [ + "Data" + ], + "type": "table" + } + }, + "id": "xy-edge__AdvancedDynamicFormBuilder-ziCu4{œdataTypeœ:œAdvancedDynamicFormBuilderœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œnameœ:œform_dataœ,œoutput_typesœ:[œDataœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGV{œfieldNameœ:œdocs_metadataœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGVœ,œinputTypesœ:[œDataœ],œtypeœ:œtableœ}", + "selected": false, + "source": "AdvancedDynamicFormBuilder-ziCu4", + "sourceHandle": "{œdataTypeœ:œAdvancedDynamicFormBuilderœ,œidœ:œAdvancedDynamicFormBuilder-ziCu4œ,œnameœ:œform_dataœ,œoutput_typesœ:[œDataœ]}", + "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGV", + "targetHandle": "{œfieldNameœ:œdocs_metadataœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGVœ,œinputTypesœ:[œDataœ],œtypeœ:œtableœ}" + }, + { + "animated": false, + "data": { + "sourceHandle": { + "dataType": "SplitText", + "id": "SplitText-QIKhg", + "name": "dataframe", + "output_types": [ + "DataFrame" + ] + }, + "targetHandle": { + "fieldName": "ingest_data", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGV", + "inputTypes": [ + "Data", + "DataFrame" + ], + "type": "other" + } + }, + "id": "xy-edge__SplitText-QIKhg{œdataTypeœ:œSplitTextœ,œidœ:œSplitText-QIKhgœ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGV{œfieldNameœ:œingest_dataœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGVœ,œinputTypesœ:[œDataœ,œDataFrameœ],œtypeœ:œotherœ}", + "selected": false, + "source": "SplitText-QIKhg", + "sourceHandle": "{œdataTypeœ:œSplitTextœ,œidœ:œSplitText-QIKhgœ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}", + "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGV", + "targetHandle": "{œfieldNameœ:œingest_dataœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGVœ,œinputTypesœ:[œDataœ,œDataFrameœ],œtypeœ:œotherœ}" + }, + { + "animated": false, "data": { "sourceHandle": { "dataType": "EmbeddingModel", @@ -244,19 +353,73 @@ }, "targetHandle": { "fieldName": "embedding", - "id": "OpenSearchHybrid-Ve6bS", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGV", "inputTypes": [ "Embeddings" ], "type": "other" } }, - "id": "xy-edge__EmbeddingModel-XjV5v{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-XjV5vœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-OpenSearchHybrid-Ve6bS{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchHybrid-Ve6bSœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}", + "id": "xy-edge__EmbeddingModel-XjV5v{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-XjV5vœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGV{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGVœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}", "selected": false, "source": "EmbeddingModel-XjV5v", "sourceHandle": "{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-XjV5vœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}", - "target": "OpenSearchHybrid-Ve6bS", - "targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchHybrid-Ve6bSœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}" + "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGV", + "targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGVœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}" + }, + { + "animated": false, + "data": { + "sourceHandle": { + "dataType": "EmbeddingModel", + "id": "EmbeddingModel-muH88", + "name": "embeddings", + "output_types": [ + "Embeddings" + ] + }, + "targetHandle": { + "fieldName": "embedding", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGV", + "inputTypes": [ + "Embeddings" + ], + "type": "other" + } + }, + "id": "xy-edge__EmbeddingModel-muH88{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-muH88œ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGV{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGVœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}", + "selected": false, + "source": "EmbeddingModel-muH88", + "sourceHandle": "{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-muH88œ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}", + "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGV", + "targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGVœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}" + }, + { + "animated": false, + "data": { + "sourceHandle": { + "dataType": "EmbeddingModel", + "id": "EmbeddingModel-Rp0iI", + "name": "embeddings", + "output_types": [ + "Embeddings" + ] + }, + "targetHandle": { + "fieldName": "embedding", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGV", + "inputTypes": [ + "Embeddings" + ], + "type": "other" + } + }, + "id": "xy-edge__EmbeddingModel-Rp0iI{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-Rp0iIœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGV{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGVœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}", + "selected": false, + "source": "EmbeddingModel-Rp0iI", + "sourceHandle": "{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-Rp0iIœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}", + "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGV", + "targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGVœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}" } ], "nodes": [ @@ -503,647 +666,6 @@ "type": "genericNode", "width": 320 }, - { - "data": { - "id": "OpenSearchHybrid-Ve6bS", - "node": { - "base_classes": [ - "Data", - "DataFrame", - "VectorStore" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Store and search documents using OpenSearch with hybrid semantic and keyword search capabilities.", - "display_name": "OpenSearch", - "documentation": "", - "edited": true, - "field_order": [ - "docs_metadata", - "opensearch_url", - "index_name", - "engine", - "space_type", - "ef_construction", - "m", - "ingest_data", - "search_query", - "should_cache_vector_store", - "embedding", - "vector_field", - "number_of_results", - "filter_expression", - "auth_mode", - "username", - "password", - "jwt_token", - "jwt_header", - "bearer_prefix", - "use_ssl", - "verify_certs" - ], - "frozen": false, - "icon": "OpenSearch", - "legacy": false, - "lf_version": "1.6.0", - "metadata": { - "code_hash": "08d808984c3d", - "dependencies": { - "dependencies": [ - { - "name": "opensearchpy", - "version": "2.8.0" - }, - { - "name": "lfx", - "version": null - } - ], - "total_dependencies": 2 - }, - "module": "custom_components.opensearch" - }, - "minimized": false, - "output_types": [], - "outputs": [ - { - "allows_loop": false, - "cache": true, - "display_name": "Search Results", - "group_outputs": false, - "hidden": null, - "method": "search_documents", - "name": "search_results", - "options": null, - "required_inputs": null, - "tool_mode": true, - "types": [ - "Data" - ], - "value": "__UNDEFINED__" - }, - { - "allows_loop": false, - "cache": true, - "display_name": "DataFrame", - "group_outputs": false, - "hidden": null, - "method": "as_dataframe", - "name": "dataframe", - "options": null, - "required_inputs": null, - "selected": "DataFrame", - "tool_mode": true, - "types": [ - "DataFrame" - ], - "value": "__UNDEFINED__" - }, - { - "allows_loop": false, - "cache": true, - "display_name": "Vector Store Connection", - "group_outputs": false, - "hidden": false, - "method": "as_vector_store", - "name": "vectorstoreconnection", - "options": null, - "required_inputs": null, - "tool_mode": true, - "types": [ - "VectorStore" - ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "auth_mode": { - "_input_type": "DropdownInput", - "advanced": false, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Authentication Mode", - "dynamic": false, - "external_options": {}, - "info": "Authentication method: 'basic' for username/password authentication, or 'jwt' for JSON Web Token (Bearer) authentication.", - "load_from_db": false, - "name": "auth_mode", - "options": [ - "basic", - "jwt" - ], - "options_metadata": [], - "placeholder": "", - "real_time_refresh": true, - "required": false, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "jwt" - }, - "bearer_prefix": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Prefix 'Bearer '", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "name": "bearer_prefix", - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "bool", - "value": true - }, - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "code", - "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom typing import Any, List, Optional\n\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponent(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"]\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"]),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model being used (e.g., 'text-embedding-3-small'). \"\n \"Used to create dynamic vector field names and track which model embedded each document. \"\n \"Auto-detected from embedding component if not specified.\"\n ),\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=False,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=False,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=True,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from embedding component\n if hasattr(self, \"embedding\") and self.embedding:\n if hasattr(self.embedding, \"model\"):\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_name\"):\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'model' or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\n \"type\": \"keyword\"\n },\n \"embedding_dimensions\": {\n \"type\": \"integer\"\n }\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n raise ValueError(\n f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n )\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n self.log(self.ingest_data)\n client = self.build_client()\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n docs = self.ingest_data or []\n if not docs:\n self.log(\"No documents to ingest.\")\n return\n\n # Get embedding model name\n embedding_model = self._get_embedding_model_name()\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n self.log(f\"Using embedding model: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return self.embedding.embed_documents([chunk_text])[0]\n\n vectors: Optional[List[List[float]]] = None\n last_exception: Optional[Exception] = None\n delay = 1.0\n attempts = 0\n\n while attempts < 3:\n attempts += 1\n try:\n max_workers = min(max(len(texts), 1), 8)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= 3:\n logger.error(\n \"Embedding generation failed after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed (attempt %s/%s), retrying in %.1fs\",\n attempts,\n 3,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed: {last_exception}\" if last_exception else \"Embedding generation failed\"\n )\n\n if not vectors:\n self.log(\"No vectors generated from documents.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(\n f\"Failed to create index '{self.index_name}': {creation_error}\"\n )\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\n \"size\": 0,\n \"aggs\": {\n \"embedding_models\": {\n \"terms\": {\n \"field\": \"embedding_model\",\n \"size\": 10\n }\n }\n }\n }\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\n \"bool\": {\n \"filter\": filter_clauses\n }\n }\n\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n return models\n except Exception as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n return [self._get_embedding_model_name()]\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except Exception as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(\n f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\"\n )\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return True\n\n return False\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models in parallel\n query_embeddings = {}\n\n # Note: Langflow is synchronous, so we can't use true async here\n # But we log the intent for parallel processing\n logger.info(f\"Generating embeddings for {len(available_models)} models\")\n\n original_model_attr = getattr(self.embedding, \"model\", None)\n original_deployment_attr = getattr(self.embedding, \"deployment\", None)\n original_dimensions_attr = getattr(self.embedding, \"dimensions\", None)\n\n for model_name in available_models:\n try:\n # In a real async environment, these would run in parallel\n # For now, they run sequentially\n if hasattr(self.embedding, \"model\"):\n setattr(self.embedding, \"model\", model_name)\n if hasattr(self.embedding, \"deployment\"):\n setattr(self.embedding, \"deployment\", model_name)\n if hasattr(self.embedding, \"dimensions\"):\n setattr(self.embedding, \"dimensions\", None)\n vec = self.embedding.embed_query(q)\n query_embeddings[model_name] = vec\n logger.info(f\"Generated embedding for model: {model_name}\")\n except Exception as e:\n logger.error(f\"Failed to generate embedding for {model_name}: {e}\")\n\n if hasattr(self.embedding, \"model\"):\n setattr(self.embedding, \"model\", original_model_attr)\n if hasattr(self.embedding, \"deployment\"):\n setattr(self.embedding, \"deployment\", original_deployment_attr)\n if hasattr(self.embedding, \"dimensions\"):\n setattr(self.embedding, \"dimensions\", original_dimensions_attr)\n\n if not query_embeddings:\n msg = \"Failed to generate embeddings for any model\"\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n continue\n\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\n \"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)],\n \"minimum_should_match\": 1\n }\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models\"\n )\n\n try:\n resp = client.search(\n index=self.index_name, body=body, params={\"terminate_after\": 0}\n )\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n raw = self.search(self.search_query or \"\")\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n self.log(self.ingest_data)\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n if is_basic:\n build_config[\"jwt_token\"][\"value\"] = \"\"\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" - }, - "docs_metadata": { - "_input_type": "TableInput", - "advanced": false, - "display_name": "Document Metadata", - "dynamic": false, - "info": "Additional metadata key-value pairs to be added to all ingested documents. Useful for tagging documents with source information, categories, or other custom attributes.", - "input_types": [ - "Data" - ], - "is_list": true, - "list_add_label": "Add More", - "name": "docs_metadata", - "placeholder": "", - "required": false, - "show": true, - "table_icon": "Table", - "table_schema": [ - { - "description": "Key name", - "display_name": "Key", - "formatter": "text", - "name": "key", - "type": "str" - }, - { - "description": "Value of the metadata", - "display_name": "Value", - "formatter": "text", - "load_from_db": true, - "name": "value", - "type": "str" - } - ], - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "trigger_icon": "Table", - "trigger_text": "Open table", - "type": "table", - "value": [ - { - "key": "owner_name", - "value": "OWNER_NAME" - }, - { - "key": "owner", - "value": "OWNER" - }, - { - "key": "owner_email", - "value": "OWNER_EMAIL" - }, - { - "key": "connector_type", - "value": "CONNECTOR_TYPE_URL" - } - ] - }, - "ef_construction": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "EF Construction", - "dynamic": false, - "info": "Size of the dynamic candidate list during index construction. Higher values improve recall but increase indexing time and memory usage.", - "list": false, - "list_add_label": "Add More", - "name": "ef_construction", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "int", - "value": 512 - }, - "embedding": { - "_input_type": "HandleInput", - "advanced": false, - "display_name": "Embedding", - "dynamic": false, - "info": "", - "input_types": [ - "Embeddings" - ], - "list": false, - "list_add_label": "Add More", - "name": "embedding", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_metadata": true, - "type": "other", - "value": "" - }, - "engine": { - "_input_type": "DropdownInput", - "advanced": true, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Vector Engine", - "dynamic": false, - "external_options": {}, - "info": "Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.", - "load_from_db": false, - "name": "engine", - "options": [ - "jvector", - "nmslib", - "faiss", - "lucene" - ], - "options_metadata": [], - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "jvector" - }, - "filter_expression": { - "_input_type": "MultilineInput", - "advanced": false, - "copy_field": false, - "display_name": "Search Filters (JSON)", - "dynamic": false, - "info": "Optional JSON configuration for search filtering, result limits, and score thresholds.\n\nFormat 1 - Explicit filters:\n{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, {\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\n\nFormat 2 - Context-style mapping:\n{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\n\nUse __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "multiline": true, - "name": "filter_expression", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "str", - "value": "" - }, - "index_name": { - "_input_type": "StrInput", - "advanced": false, - "display_name": "Index Name", - "dynamic": false, - "info": "The OpenSearch index name where documents will be stored and searched. Will be created automatically if it doesn't exist.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "index_name", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "documents" - }, - "ingest_data": { - "_input_type": "HandleInput", - "advanced": false, - "display_name": "Ingest Data", - "dynamic": false, - "info": "", - "input_types": [ - "Data", - "DataFrame" - ], - "list": true, - "list_add_label": "Add More", - "name": "ingest_data", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "trace_as_metadata": true, - "type": "other", - "value": "" - }, - "jwt_header": { - "_input_type": "StrInput", - "advanced": true, - "display_name": "JWT Header Name", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "jwt_header", - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "Authorization" - }, - "jwt_token": { - "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "JWT Token", - "dynamic": false, - "info": "Valid JSON Web Token for authentication. Will be sent in the Authorization header (with optional 'Bearer ' prefix).", - "input_types": [], - "load_from_db": true, - "name": "jwt_token", - "password": true, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "type": "str", - "value": "JWT" - }, - "m": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "M Parameter", - "dynamic": false, - "info": "Number of bidirectional connections for each vector in the HNSW graph. Higher values improve search quality but increase memory usage and indexing time.", - "list": false, - "list_add_label": "Add More", - "name": "m", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "int", - "value": 16 - }, - "number_of_results": { - "_input_type": "IntInput", - "advanced": true, - "display_name": "Default Result Limit", - "dynamic": false, - "info": "Default maximum number of search results to return when no limit is specified in the filter expression.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "number_of_results", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "int", - "value": 15 - }, - "opensearch_url": { - "_input_type": "StrInput", - "advanced": false, - "display_name": "OpenSearch URL", - "dynamic": false, - "info": "The connection URL for your OpenSearch cluster (e.g., http://localhost:9200 for local development or your cloud endpoint).", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "opensearch_url", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "https://opensearch:9200" - }, - "password": { - "_input_type": "SecretStrInput", - "advanced": false, - "display_name": "OpenSearch Password", - "dynamic": false, - "info": "", - "input_types": [], - "load_from_db": false, - "name": "password", - "password": true, - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "type": "str", - "value": "" - }, - "search_query": { - "_input_type": "QueryInput", - "advanced": false, - "display_name": "Search Query", - "dynamic": false, - "info": "Enter a query to run a similarity search.", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "search_query", - "placeholder": "Enter a query...", - "required": false, - "show": true, - "title_case": false, - "tool_mode": true, - "trace_as_input": true, - "trace_as_metadata": true, - "type": "query", - "value": "" - }, - "should_cache_vector_store": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Cache Vector Store", - "dynamic": false, - "info": "If True, the vector store will be cached for the current build of the component. This is useful for components that have multiple output methods and want to share the same vector store.", - "list": false, - "list_add_label": "Add More", - "name": "should_cache_vector_store", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "bool", - "value": true - }, - "space_type": { - "_input_type": "DropdownInput", - "advanced": true, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Distance Metric", - "dynamic": false, - "external_options": {}, - "info": "Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, 'cosinesimil' for cosine similarity, 'innerproduct' for dot product.", - "name": "space_type", - "options": [ - "l2", - "l1", - "cosinesimil", - "linf", - "innerproduct" - ], - "options_metadata": [], - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "l2" - }, - "use_ssl": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Use SSL/TLS", - "dynamic": false, - "info": "Enable SSL/TLS encryption for secure connections to OpenSearch.", - "list": false, - "list_add_label": "Add More", - "name": "use_ssl", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "bool", - "value": true - }, - "username": { - "_input_type": "StrInput", - "advanced": false, - "display_name": "Username", - "dynamic": false, - "info": "", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "username", - "placeholder": "", - "required": false, - "show": false, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "admin" - }, - "vector_field": { - "_input_type": "StrInput", - "advanced": true, - "display_name": "Vector Field Name", - "dynamic": false, - "info": "Name of the field in OpenSearch documents that stores the vector embeddings for similarity search.", - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "vector_field", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "str", - "value": "chunk_embedding" - }, - "verify_certs": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Verify SSL Certificates", - "dynamic": false, - "info": "Verify SSL certificates when connecting. Disable for self-signed certificates in development environments.", - "list": false, - "list_add_label": "Add More", - "name": "verify_certs", - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "type": "bool", - "value": false - } - }, - "tool_mode": false - }, - "selected_output": "dataframe", - "showNode": true, - "type": "OpenSearchVectorStoreComponent" - }, - "dragging": false, - "id": "OpenSearchHybrid-Ve6bS", - "measured": { - "height": 822, - "width": 320 - }, - "position": { - "x": 2694.183983837566, - "y": 1425.777807367294 - }, - "selected": false, - "type": "genericNode" - }, { "data": { "id": "URLComponent-lnA0q", @@ -1520,14 +1042,16 @@ "width": 320 }, "position": { - "x": 1249.8241608743583, - "y": 1270.7229143090308 + "x": 1253.2399253814751, + "y": 1554.2313683997174 }, "selected": false, "type": "genericNode" }, { "data": { + "description": "Perform various operations on a DataFrame.", + "display_name": "DataFrame Operations", "id": "DataFrameOperations-hqIoy", "node": { "base_classes": [ @@ -1538,7 +1062,7 @@ "custom_fields": {}, "description": "Perform various operations on a DataFrame.", "display_name": "DataFrame Operations", - "documentation": "https://docs.langflow.org/components-processing#dataframe-operations", + "documentation": "https://docs.langflow.org/dataframe-operations", "edited": false, "field_order": [ "df", @@ -1556,11 +1080,10 @@ ], "frozen": false, "icon": "table", - "last_updated": "2025-11-24T17:58:32.464Z", + "last_updated": "2025-11-25T23:37:45.067Z", "legacy": false, - "lf_version": "1.6.0", "metadata": { - "code_hash": "b4d6b19b6eef", + "code_hash": "904f4eaebccd", "dependencies": { "dependencies": [ { @@ -1574,7 +1097,7 @@ ], "total_dependencies": 2 }, - "module": "lfx.components.processing.dataframe_operations.DataFrameOperationsComponent" + "module": "custom_components.dataframe_operations" }, "minimized": false, "output_types": [], @@ -1584,6 +1107,7 @@ "cache": true, "display_name": "DataFrame", "group_outputs": false, + "loop_types": null, "method": "perform_operation", "name": "output", "options": null, @@ -1598,6 +1122,12 @@ ], "pinned": false, "template": { + "_frontend_node_flow_id": { + "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, "_type": "Component", "ascending": { "_input_type": "BoolInput", @@ -1608,12 +1138,14 @@ "list": false, "list_add_label": "Add More", "name": "ascending", + "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "bool", "value": true }, @@ -1633,7 +1165,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import pandas as pd\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.inputs import SortableListInput\nfrom lfx.io import BoolInput, DataFrameInput, DropdownInput, IntInput, MessageTextInput, Output, StrInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dataframe import DataFrame\n\n\nclass DataFrameOperationsComponent(Component):\n display_name = \"DataFrame Operations\"\n description = \"Perform various operations on a DataFrame.\"\n documentation: str = \"https://docs.langflow.org/components-processing#dataframe-operations\"\n icon = \"table\"\n name = \"DataFrameOperations\"\n\n OPERATION_CHOICES = [\n \"Add Column\",\n \"Drop Column\",\n \"Filter\",\n \"Head\",\n \"Rename Column\",\n \"Replace Value\",\n \"Select Columns\",\n \"Sort\",\n \"Tail\",\n \"Drop Duplicates\",\n ]\n\n inputs = [\n DataFrameInput(\n name=\"df\",\n display_name=\"DataFrame\",\n info=\"The input DataFrame to operate on.\",\n required=True,\n ),\n SortableListInput(\n name=\"operation\",\n display_name=\"Operation\",\n placeholder=\"Select Operation\",\n info=\"Select the DataFrame operation to perform.\",\n options=[\n {\"name\": \"Add Column\", \"icon\": \"plus\"},\n {\"name\": \"Drop Column\", \"icon\": \"minus\"},\n {\"name\": \"Filter\", \"icon\": \"filter\"},\n {\"name\": \"Head\", \"icon\": \"arrow-up\"},\n {\"name\": \"Rename Column\", \"icon\": \"pencil\"},\n {\"name\": \"Replace Value\", \"icon\": \"replace\"},\n {\"name\": \"Select Columns\", \"icon\": \"columns\"},\n {\"name\": \"Sort\", \"icon\": \"arrow-up-down\"},\n {\"name\": \"Tail\", \"icon\": \"arrow-down\"},\n {\"name\": \"Drop Duplicates\", \"icon\": \"copy-x\"},\n ],\n real_time_refresh=True,\n limit=1,\n ),\n StrInput(\n name=\"column_name\",\n display_name=\"Column Name\",\n info=\"The column name to use for the operation.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"filter_value\",\n display_name=\"Filter Value\",\n info=\"The value to filter rows by.\",\n dynamic=True,\n show=False,\n ),\n DropdownInput(\n name=\"filter_operator\",\n display_name=\"Filter Operator\",\n options=[\n \"equals\",\n \"not equals\",\n \"contains\",\n \"not contains\",\n \"starts with\",\n \"ends with\",\n \"greater than\",\n \"less than\",\n ],\n value=\"equals\",\n info=\"The operator to apply for filtering rows.\",\n advanced=False,\n dynamic=True,\n show=False,\n ),\n BoolInput(\n name=\"ascending\",\n display_name=\"Sort Ascending\",\n info=\"Whether to sort in ascending order.\",\n dynamic=True,\n show=False,\n value=True,\n ),\n StrInput(\n name=\"new_column_name\",\n display_name=\"New Column Name\",\n info=\"The new column name when renaming or adding a column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"new_column_value\",\n display_name=\"New Column Value\",\n info=\"The value to populate the new column with.\",\n dynamic=True,\n show=False,\n ),\n StrInput(\n name=\"columns_to_select\",\n display_name=\"Columns to Select\",\n dynamic=True,\n is_list=True,\n show=False,\n ),\n IntInput(\n name=\"num_rows\",\n display_name=\"Number of Rows\",\n info=\"Number of rows to return (for head/tail).\",\n dynamic=True,\n show=False,\n value=5,\n ),\n MessageTextInput(\n name=\"replace_value\",\n display_name=\"Value to Replace\",\n info=\"The value to replace in the column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"replacement_value\",\n display_name=\"Replacement Value\",\n info=\"The value to replace with.\",\n dynamic=True,\n show=False,\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"DataFrame\",\n name=\"output\",\n method=\"perform_operation\",\n info=\"The resulting DataFrame after the operation.\",\n )\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n dynamic_fields = [\n \"column_name\",\n \"filter_value\",\n \"filter_operator\",\n \"ascending\",\n \"new_column_name\",\n \"new_column_value\",\n \"columns_to_select\",\n \"num_rows\",\n \"replace_value\",\n \"replacement_value\",\n ]\n for field in dynamic_fields:\n build_config[field][\"show\"] = False\n\n if field_name == \"operation\":\n # Handle SortableListInput format\n if isinstance(field_value, list):\n operation_name = field_value[0].get(\"name\", \"\") if field_value else \"\"\n else:\n operation_name = field_value or \"\"\n\n # If no operation selected, all dynamic fields stay hidden (already set to False above)\n if not operation_name:\n return build_config\n\n if operation_name == \"Filter\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"filter_value\"][\"show\"] = True\n build_config[\"filter_operator\"][\"show\"] = True\n elif operation_name == \"Sort\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"ascending\"][\"show\"] = True\n elif operation_name == \"Drop Column\":\n build_config[\"column_name\"][\"show\"] = True\n elif operation_name == \"Rename Column\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"new_column_name\"][\"show\"] = True\n elif operation_name == \"Add Column\":\n build_config[\"new_column_name\"][\"show\"] = True\n build_config[\"new_column_value\"][\"show\"] = True\n elif operation_name == \"Select Columns\":\n build_config[\"columns_to_select\"][\"show\"] = True\n elif operation_name in {\"Head\", \"Tail\"}:\n build_config[\"num_rows\"][\"show\"] = True\n elif operation_name == \"Replace Value\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"replace_value\"][\"show\"] = True\n build_config[\"replacement_value\"][\"show\"] = True\n elif operation_name == \"Drop Duplicates\":\n build_config[\"column_name\"][\"show\"] = True\n\n return build_config\n\n def perform_operation(self) -> DataFrame:\n df_copy = self.df.copy()\n\n # Handle SortableListInput format for operation\n operation_input = getattr(self, \"operation\", [])\n if isinstance(operation_input, list) and len(operation_input) > 0:\n op = operation_input[0].get(\"name\", \"\")\n else:\n op = \"\"\n\n # If no operation selected, return original DataFrame\n if not op:\n return df_copy\n\n if op == \"Filter\":\n return self.filter_rows_by_value(df_copy)\n if op == \"Sort\":\n return self.sort_by_column(df_copy)\n if op == \"Drop Column\":\n return self.drop_column(df_copy)\n if op == \"Rename Column\":\n return self.rename_column(df_copy)\n if op == \"Add Column\":\n return self.add_column(df_copy)\n if op == \"Select Columns\":\n return self.select_columns(df_copy)\n if op == \"Head\":\n return self.head(df_copy)\n if op == \"Tail\":\n return self.tail(df_copy)\n if op == \"Replace Value\":\n return self.replace_values(df_copy)\n if op == \"Drop Duplicates\":\n return self.drop_duplicates(df_copy)\n msg = f\"Unsupported operation: {op}\"\n logger.error(msg)\n raise ValueError(msg)\n\n def filter_rows_by_value(self, df: DataFrame) -> DataFrame:\n column = df[self.column_name]\n filter_value = self.filter_value\n\n # Handle regular DropdownInput format (just a string value)\n operator = getattr(self, \"filter_operator\", \"equals\") # Default to equals for backward compatibility\n\n if operator == \"equals\":\n mask = column == filter_value\n elif operator == \"not equals\":\n mask = column != filter_value\n elif operator == \"contains\":\n mask = column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"not contains\":\n mask = ~column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"starts with\":\n mask = column.astype(str).str.startswith(str(filter_value), na=False)\n elif operator == \"ends with\":\n mask = column.astype(str).str.endswith(str(filter_value), na=False)\n elif operator == \"greater than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column > numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) > str(filter_value)\n elif operator == \"less than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column < numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) < str(filter_value)\n else:\n mask = column == filter_value # Fallback to equals\n\n return DataFrame(df[mask])\n\n def sort_by_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.sort_values(by=self.column_name, ascending=self.ascending))\n\n def drop_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop(columns=[self.column_name]))\n\n def rename_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.rename(columns={self.column_name: self.new_column_name}))\n\n def add_column(self, df: DataFrame) -> DataFrame:\n df[self.new_column_name] = [self.new_column_value] * len(df)\n return DataFrame(df)\n\n def select_columns(self, df: DataFrame) -> DataFrame:\n columns = [col.strip() for col in self.columns_to_select]\n return DataFrame(df[columns])\n\n def head(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.head(self.num_rows))\n\n def tail(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.tail(self.num_rows))\n\n def replace_values(self, df: DataFrame) -> DataFrame:\n df[self.column_name] = df[self.column_name].replace(self.replace_value, self.replacement_value)\n return DataFrame(df)\n\n def drop_duplicates(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop_duplicates(subset=self.column_name))\n" + "value": "import pandas as pd\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.inputs import SortableListInput\nfrom lfx.io import BoolInput, DataFrameInput, DropdownInput, IntInput, MessageTextInput, Output, StrInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dataframe import DataFrame\n\n\nclass DataFrameOperationsComponent(Component):\n display_name = \"DataFrame Operations\"\n description = \"Perform various operations on a DataFrame.\"\n documentation: str = \"https://docs.langflow.org/dataframe-operations\"\n icon = \"table\"\n name = \"DataFrameOperations\"\n\n OPERATION_CHOICES = [\n \"Add Column\",\n \"Drop Column\",\n \"Filter\",\n \"Head\",\n \"Rename Column\",\n \"Replace Value\",\n \"Select Columns\",\n \"Sort\",\n \"Tail\",\n \"Drop Duplicates\",\n ]\n\n inputs = [\n DataFrameInput(\n name=\"df\",\n display_name=\"DataFrame\",\n info=\"The input DataFrame to operate on.\",\n required=True,\n ),\n SortableListInput(\n name=\"operation\",\n display_name=\"Operation\",\n placeholder=\"Select Operation\",\n info=\"Select the DataFrame operation to perform.\",\n options=[\n {\"name\": \"Add Column\", \"icon\": \"plus\"},\n {\"name\": \"Drop Column\", \"icon\": \"minus\"},\n {\"name\": \"Filter\", \"icon\": \"filter\"},\n {\"name\": \"Head\", \"icon\": \"arrow-up\"},\n {\"name\": \"Rename Column\", \"icon\": \"pencil\"},\n {\"name\": \"Replace Value\", \"icon\": \"replace\"},\n {\"name\": \"Select Columns\", \"icon\": \"columns\"},\n {\"name\": \"Sort\", \"icon\": \"arrow-up-down\"},\n {\"name\": \"Tail\", \"icon\": \"arrow-down\"},\n {\"name\": \"Drop Duplicates\", \"icon\": \"copy-x\"},\n ],\n real_time_refresh=True,\n limit=1,\n ),\n StrInput(\n name=\"column_name\",\n display_name=\"Column Name\",\n info=\"The column name to use for the operation.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"filter_value\",\n display_name=\"Filter Value\",\n info=\"The value to filter rows by.\",\n dynamic=True,\n show=False,\n ),\n DropdownInput(\n name=\"filter_operator\",\n display_name=\"Filter Operator\",\n options=[\n \"equals\",\n \"not equals\",\n \"contains\",\n \"not contains\",\n \"starts with\",\n \"ends with\",\n \"greater than\",\n \"less than\",\n ],\n value=\"equals\",\n info=\"The operator to apply for filtering rows.\",\n advanced=False,\n dynamic=True,\n show=False,\n ),\n BoolInput(\n name=\"ascending\",\n display_name=\"Sort Ascending\",\n info=\"Whether to sort in ascending order.\",\n dynamic=True,\n show=False,\n value=True,\n ),\n StrInput(\n name=\"new_column_name\",\n display_name=\"New Column Name\",\n info=\"The new column name when renaming or adding a column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"new_column_value\",\n display_name=\"New Column Value\",\n info=\"The value to populate the new column with.\",\n dynamic=True,\n show=False,\n ),\n StrInput(\n name=\"columns_to_select\",\n display_name=\"Columns to Select\",\n dynamic=True,\n is_list=True,\n show=False,\n ),\n IntInput(\n name=\"num_rows\",\n display_name=\"Number of Rows\",\n info=\"Number of rows to return (for head/tail).\",\n dynamic=True,\n show=False,\n value=5,\n ),\n MessageTextInput(\n name=\"replace_value\",\n display_name=\"Value to Replace\",\n info=\"The value to replace in the column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"replacement_value\",\n display_name=\"Replacement Value\",\n info=\"The value to replace with.\",\n dynamic=True,\n show=False,\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"DataFrame\",\n name=\"output\",\n method=\"perform_operation\",\n info=\"The resulting DataFrame after the operation.\",\n )\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n dynamic_fields = [\n \"column_name\",\n \"filter_value\",\n \"filter_operator\",\n \"ascending\",\n \"new_column_name\",\n \"new_column_value\",\n \"columns_to_select\",\n \"num_rows\",\n \"replace_value\",\n \"replacement_value\",\n ]\n for field in dynamic_fields:\n build_config[field][\"show\"] = False\n\n if field_name == \"operation\":\n # Handle SortableListInput format\n if isinstance(field_value, list):\n operation_name = field_value[0].get(\"name\", \"\") if field_value else \"\"\n else:\n operation_name = field_value or \"\"\n\n # If no operation selected, all dynamic fields stay hidden (already set to False above)\n if not operation_name:\n return build_config\n\n if operation_name == \"Filter\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"filter_value\"][\"show\"] = True\n build_config[\"filter_operator\"][\"show\"] = True\n elif operation_name == \"Sort\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"ascending\"][\"show\"] = True\n elif operation_name == \"Drop Column\":\n build_config[\"column_name\"][\"show\"] = True\n elif operation_name == \"Rename Column\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"new_column_name\"][\"show\"] = True\n elif operation_name == \"Add Column\":\n build_config[\"new_column_name\"][\"show\"] = True\n build_config[\"new_column_value\"][\"show\"] = True\n elif operation_name == \"Select Columns\":\n build_config[\"columns_to_select\"][\"show\"] = True\n elif operation_name in {\"Head\", \"Tail\"}:\n build_config[\"num_rows\"][\"show\"] = True\n elif operation_name == \"Replace Value\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"replace_value\"][\"show\"] = True\n build_config[\"replacement_value\"][\"show\"] = True\n elif operation_name == \"Drop Duplicates\":\n build_config[\"column_name\"][\"show\"] = True\n\n return build_config\n\n def perform_operation(self) -> DataFrame:\n df_copy = self.df.copy()\n\n # Handle SortableListInput format for operation\n operation_input = getattr(self, \"operation\", [])\n if isinstance(operation_input, list) and len(operation_input) > 0:\n op = operation_input[0].get(\"name\", \"\")\n else:\n op = \"\"\n\n # If no operation selected, return original DataFrame\n if not op:\n return df_copy\n\n if op == \"Filter\":\n return self.filter_rows_by_value(df_copy)\n if op == \"Sort\":\n return self.sort_by_column(df_copy)\n if op == \"Drop Column\":\n return self.drop_column(df_copy)\n if op == \"Rename Column\":\n return self.rename_column(df_copy)\n if op == \"Add Column\":\n return self.add_column(df_copy)\n if op == \"Select Columns\":\n return self.select_columns(df_copy)\n if op == \"Head\":\n return self.head(df_copy)\n if op == \"Tail\":\n return self.tail(df_copy)\n if op == \"Replace Value\":\n return self.replace_values(df_copy)\n if op == \"Drop Duplicates\":\n return self.drop_duplicates(df_copy)\n msg = f\"Unsupported operation: {op}\"\n logger.error(msg)\n raise ValueError(msg)\n\n def filter_rows_by_value(self, df: DataFrame) -> DataFrame:\n column = df[self.column_name]\n filter_value = self.filter_value\n\n # Handle regular DropdownInput format (just a string value)\n operator = getattr(self, \"filter_operator\", \"equals\") # Default to equals for backward compatibility\n\n if operator == \"equals\":\n mask = column == filter_value\n elif operator == \"not equals\":\n mask = column != filter_value\n elif operator == \"contains\":\n mask = column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"not contains\":\n mask = ~column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"starts with\":\n mask = column.astype(str).str.startswith(str(filter_value), na=False)\n elif operator == \"ends with\":\n mask = column.astype(str).str.endswith(str(filter_value), na=False)\n elif operator == \"greater than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column > numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) > str(filter_value)\n elif operator == \"less than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column < numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) < str(filter_value)\n else:\n mask = column == filter_value # Fallback to equals\n\n return DataFrame(df[mask])\n\n def sort_by_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.sort_values(by=self.column_name, ascending=self.ascending))\n\n def drop_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop(columns=[self.column_name]))\n\n def rename_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.rename(columns={self.column_name: self.new_column_name}))\n\n def add_column(self, df: DataFrame) -> DataFrame:\n df[self.new_column_name] = [self.new_column_value] * len(df)\n return DataFrame(df)\n\n def select_columns(self, df: DataFrame) -> DataFrame:\n columns = [col.strip() for col in self.columns_to_select]\n return DataFrame(df[columns])\n\n def head(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.head(self.num_rows))\n\n def tail(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.tail(self.num_rows))\n\n def replace_values(self, df: DataFrame) -> DataFrame:\n df[self.column_name] = df[self.column_name].replace(self.replace_value, self.replacement_value)\n return DataFrame(df)\n\n def drop_duplicates(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop_duplicates(subset=self.column_name))\n" }, "column_name": { "_input_type": "StrInput", @@ -1645,12 +1177,14 @@ "list_add_label": "Add More", "load_from_db": false, "name": "column_name", + "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -1664,12 +1198,14 @@ "list_add_label": "Add More", "load_from_db": false, "name": "columns_to_select", + "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -1685,6 +1221,7 @@ "list": false, "list_add_label": "Add More", "name": "df", + "override_skip": false, "placeholder": "", "required": true, "show": true, @@ -1692,6 +1229,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "other", "value": "" }, @@ -1716,6 +1254,7 @@ "less than" ], "options_metadata": [], + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -1723,6 +1262,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "equals" }, @@ -1739,6 +1279,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "filter_value", + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -1746,9 +1287,11 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, + "is_refresh": false, "new_column_name": { "_input_type": "StrInput", "advanced": false, @@ -1759,12 +1302,14 @@ "list_add_label": "Add More", "load_from_db": false, "name": "new_column_name", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "filename" }, @@ -1779,8 +1324,9 @@ ], "list": false, "list_add_label": "Add More", - "load_from_db": false, + "load_from_db": true, "name": "new_column_value", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -1788,8 +1334,9 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", - "value": "" + "value": "FILENAME" }, "num_rows": { "_input_type": "IntInput", @@ -1800,12 +1347,14 @@ "list": false, "list_add_label": "Add More", "name": "num_rows", + "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": 5 }, @@ -1816,6 +1365,7 @@ "dynamic": false, "info": "Select the DataFrame operation to perform.", "limit": 1, + "load_from_db": false, "name": "operation", "options": [ { @@ -1859,6 +1409,7 @@ "name": "Drop Duplicates" } ], + "override_skip": false, "placeholder": "Select Operation", "real_time_refresh": true, "required": false, @@ -1867,6 +1418,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "sortableList", "value": [ { @@ -1890,6 +1442,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "replace_value", + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -1897,6 +1450,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -1913,6 +1467,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "replacement_value", + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -1920,6 +1475,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" } @@ -1944,6 +1500,8 @@ }, { "data": { + "description": "Perform various operations on a DataFrame.", + "display_name": "DataFrame Operations", "id": "DataFrameOperations-A98BL", "node": { "base_classes": [ @@ -1954,7 +1512,7 @@ "custom_fields": {}, "description": "Perform various operations on a DataFrame.", "display_name": "DataFrame Operations", - "documentation": "https://docs.langflow.org/components-processing#dataframe-operations", + "documentation": "https://docs.langflow.org/dataframe-operations", "edited": false, "field_order": [ "df", @@ -1972,11 +1530,10 @@ ], "frozen": false, "icon": "table", - "last_updated": "2025-11-24T17:58:32.465Z", + "last_updated": "2025-11-25T23:37:45.068Z", "legacy": false, - "lf_version": "1.6.0", "metadata": { - "code_hash": "b4d6b19b6eef", + "code_hash": "904f4eaebccd", "dependencies": { "dependencies": [ { @@ -1990,7 +1547,7 @@ ], "total_dependencies": 2 }, - "module": "lfx.components.processing.dataframe_operations.DataFrameOperationsComponent" + "module": "custom_components.dataframe_operations" }, "minimized": false, "output_types": [], @@ -2000,6 +1557,7 @@ "cache": true, "display_name": "DataFrame", "group_outputs": false, + "loop_types": null, "method": "perform_operation", "name": "output", "options": null, @@ -2014,6 +1572,12 @@ ], "pinned": false, "template": { + "_frontend_node_flow_id": { + "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, "_type": "Component", "ascending": { "_input_type": "BoolInput", @@ -2024,12 +1588,14 @@ "list": false, "list_add_label": "Add More", "name": "ascending", + "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "bool", "value": true }, @@ -2049,7 +1615,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import pandas as pd\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.inputs import SortableListInput\nfrom lfx.io import BoolInput, DataFrameInput, DropdownInput, IntInput, MessageTextInput, Output, StrInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dataframe import DataFrame\n\n\nclass DataFrameOperationsComponent(Component):\n display_name = \"DataFrame Operations\"\n description = \"Perform various operations on a DataFrame.\"\n documentation: str = \"https://docs.langflow.org/components-processing#dataframe-operations\"\n icon = \"table\"\n name = \"DataFrameOperations\"\n\n OPERATION_CHOICES = [\n \"Add Column\",\n \"Drop Column\",\n \"Filter\",\n \"Head\",\n \"Rename Column\",\n \"Replace Value\",\n \"Select Columns\",\n \"Sort\",\n \"Tail\",\n \"Drop Duplicates\",\n ]\n\n inputs = [\n DataFrameInput(\n name=\"df\",\n display_name=\"DataFrame\",\n info=\"The input DataFrame to operate on.\",\n required=True,\n ),\n SortableListInput(\n name=\"operation\",\n display_name=\"Operation\",\n placeholder=\"Select Operation\",\n info=\"Select the DataFrame operation to perform.\",\n options=[\n {\"name\": \"Add Column\", \"icon\": \"plus\"},\n {\"name\": \"Drop Column\", \"icon\": \"minus\"},\n {\"name\": \"Filter\", \"icon\": \"filter\"},\n {\"name\": \"Head\", \"icon\": \"arrow-up\"},\n {\"name\": \"Rename Column\", \"icon\": \"pencil\"},\n {\"name\": \"Replace Value\", \"icon\": \"replace\"},\n {\"name\": \"Select Columns\", \"icon\": \"columns\"},\n {\"name\": \"Sort\", \"icon\": \"arrow-up-down\"},\n {\"name\": \"Tail\", \"icon\": \"arrow-down\"},\n {\"name\": \"Drop Duplicates\", \"icon\": \"copy-x\"},\n ],\n real_time_refresh=True,\n limit=1,\n ),\n StrInput(\n name=\"column_name\",\n display_name=\"Column Name\",\n info=\"The column name to use for the operation.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"filter_value\",\n display_name=\"Filter Value\",\n info=\"The value to filter rows by.\",\n dynamic=True,\n show=False,\n ),\n DropdownInput(\n name=\"filter_operator\",\n display_name=\"Filter Operator\",\n options=[\n \"equals\",\n \"not equals\",\n \"contains\",\n \"not contains\",\n \"starts with\",\n \"ends with\",\n \"greater than\",\n \"less than\",\n ],\n value=\"equals\",\n info=\"The operator to apply for filtering rows.\",\n advanced=False,\n dynamic=True,\n show=False,\n ),\n BoolInput(\n name=\"ascending\",\n display_name=\"Sort Ascending\",\n info=\"Whether to sort in ascending order.\",\n dynamic=True,\n show=False,\n value=True,\n ),\n StrInput(\n name=\"new_column_name\",\n display_name=\"New Column Name\",\n info=\"The new column name when renaming or adding a column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"new_column_value\",\n display_name=\"New Column Value\",\n info=\"The value to populate the new column with.\",\n dynamic=True,\n show=False,\n ),\n StrInput(\n name=\"columns_to_select\",\n display_name=\"Columns to Select\",\n dynamic=True,\n is_list=True,\n show=False,\n ),\n IntInput(\n name=\"num_rows\",\n display_name=\"Number of Rows\",\n info=\"Number of rows to return (for head/tail).\",\n dynamic=True,\n show=False,\n value=5,\n ),\n MessageTextInput(\n name=\"replace_value\",\n display_name=\"Value to Replace\",\n info=\"The value to replace in the column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"replacement_value\",\n display_name=\"Replacement Value\",\n info=\"The value to replace with.\",\n dynamic=True,\n show=False,\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"DataFrame\",\n name=\"output\",\n method=\"perform_operation\",\n info=\"The resulting DataFrame after the operation.\",\n )\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n dynamic_fields = [\n \"column_name\",\n \"filter_value\",\n \"filter_operator\",\n \"ascending\",\n \"new_column_name\",\n \"new_column_value\",\n \"columns_to_select\",\n \"num_rows\",\n \"replace_value\",\n \"replacement_value\",\n ]\n for field in dynamic_fields:\n build_config[field][\"show\"] = False\n\n if field_name == \"operation\":\n # Handle SortableListInput format\n if isinstance(field_value, list):\n operation_name = field_value[0].get(\"name\", \"\") if field_value else \"\"\n else:\n operation_name = field_value or \"\"\n\n # If no operation selected, all dynamic fields stay hidden (already set to False above)\n if not operation_name:\n return build_config\n\n if operation_name == \"Filter\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"filter_value\"][\"show\"] = True\n build_config[\"filter_operator\"][\"show\"] = True\n elif operation_name == \"Sort\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"ascending\"][\"show\"] = True\n elif operation_name == \"Drop Column\":\n build_config[\"column_name\"][\"show\"] = True\n elif operation_name == \"Rename Column\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"new_column_name\"][\"show\"] = True\n elif operation_name == \"Add Column\":\n build_config[\"new_column_name\"][\"show\"] = True\n build_config[\"new_column_value\"][\"show\"] = True\n elif operation_name == \"Select Columns\":\n build_config[\"columns_to_select\"][\"show\"] = True\n elif operation_name in {\"Head\", \"Tail\"}:\n build_config[\"num_rows\"][\"show\"] = True\n elif operation_name == \"Replace Value\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"replace_value\"][\"show\"] = True\n build_config[\"replacement_value\"][\"show\"] = True\n elif operation_name == \"Drop Duplicates\":\n build_config[\"column_name\"][\"show\"] = True\n\n return build_config\n\n def perform_operation(self) -> DataFrame:\n df_copy = self.df.copy()\n\n # Handle SortableListInput format for operation\n operation_input = getattr(self, \"operation\", [])\n if isinstance(operation_input, list) and len(operation_input) > 0:\n op = operation_input[0].get(\"name\", \"\")\n else:\n op = \"\"\n\n # If no operation selected, return original DataFrame\n if not op:\n return df_copy\n\n if op == \"Filter\":\n return self.filter_rows_by_value(df_copy)\n if op == \"Sort\":\n return self.sort_by_column(df_copy)\n if op == \"Drop Column\":\n return self.drop_column(df_copy)\n if op == \"Rename Column\":\n return self.rename_column(df_copy)\n if op == \"Add Column\":\n return self.add_column(df_copy)\n if op == \"Select Columns\":\n return self.select_columns(df_copy)\n if op == \"Head\":\n return self.head(df_copy)\n if op == \"Tail\":\n return self.tail(df_copy)\n if op == \"Replace Value\":\n return self.replace_values(df_copy)\n if op == \"Drop Duplicates\":\n return self.drop_duplicates(df_copy)\n msg = f\"Unsupported operation: {op}\"\n logger.error(msg)\n raise ValueError(msg)\n\n def filter_rows_by_value(self, df: DataFrame) -> DataFrame:\n column = df[self.column_name]\n filter_value = self.filter_value\n\n # Handle regular DropdownInput format (just a string value)\n operator = getattr(self, \"filter_operator\", \"equals\") # Default to equals for backward compatibility\n\n if operator == \"equals\":\n mask = column == filter_value\n elif operator == \"not equals\":\n mask = column != filter_value\n elif operator == \"contains\":\n mask = column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"not contains\":\n mask = ~column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"starts with\":\n mask = column.astype(str).str.startswith(str(filter_value), na=False)\n elif operator == \"ends with\":\n mask = column.astype(str).str.endswith(str(filter_value), na=False)\n elif operator == \"greater than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column > numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) > str(filter_value)\n elif operator == \"less than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column < numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) < str(filter_value)\n else:\n mask = column == filter_value # Fallback to equals\n\n return DataFrame(df[mask])\n\n def sort_by_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.sort_values(by=self.column_name, ascending=self.ascending))\n\n def drop_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop(columns=[self.column_name]))\n\n def rename_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.rename(columns={self.column_name: self.new_column_name}))\n\n def add_column(self, df: DataFrame) -> DataFrame:\n df[self.new_column_name] = [self.new_column_value] * len(df)\n return DataFrame(df)\n\n def select_columns(self, df: DataFrame) -> DataFrame:\n columns = [col.strip() for col in self.columns_to_select]\n return DataFrame(df[columns])\n\n def head(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.head(self.num_rows))\n\n def tail(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.tail(self.num_rows))\n\n def replace_values(self, df: DataFrame) -> DataFrame:\n df[self.column_name] = df[self.column_name].replace(self.replace_value, self.replacement_value)\n return DataFrame(df)\n\n def drop_duplicates(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop_duplicates(subset=self.column_name))\n" + "value": "import pandas as pd\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.inputs import SortableListInput\nfrom lfx.io import BoolInput, DataFrameInput, DropdownInput, IntInput, MessageTextInput, Output, StrInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dataframe import DataFrame\n\n\nclass DataFrameOperationsComponent(Component):\n display_name = \"DataFrame Operations\"\n description = \"Perform various operations on a DataFrame.\"\n documentation: str = \"https://docs.langflow.org/dataframe-operations\"\n icon = \"table\"\n name = \"DataFrameOperations\"\n\n OPERATION_CHOICES = [\n \"Add Column\",\n \"Drop Column\",\n \"Filter\",\n \"Head\",\n \"Rename Column\",\n \"Replace Value\",\n \"Select Columns\",\n \"Sort\",\n \"Tail\",\n \"Drop Duplicates\",\n ]\n\n inputs = [\n DataFrameInput(\n name=\"df\",\n display_name=\"DataFrame\",\n info=\"The input DataFrame to operate on.\",\n required=True,\n ),\n SortableListInput(\n name=\"operation\",\n display_name=\"Operation\",\n placeholder=\"Select Operation\",\n info=\"Select the DataFrame operation to perform.\",\n options=[\n {\"name\": \"Add Column\", \"icon\": \"plus\"},\n {\"name\": \"Drop Column\", \"icon\": \"minus\"},\n {\"name\": \"Filter\", \"icon\": \"filter\"},\n {\"name\": \"Head\", \"icon\": \"arrow-up\"},\n {\"name\": \"Rename Column\", \"icon\": \"pencil\"},\n {\"name\": \"Replace Value\", \"icon\": \"replace\"},\n {\"name\": \"Select Columns\", \"icon\": \"columns\"},\n {\"name\": \"Sort\", \"icon\": \"arrow-up-down\"},\n {\"name\": \"Tail\", \"icon\": \"arrow-down\"},\n {\"name\": \"Drop Duplicates\", \"icon\": \"copy-x\"},\n ],\n real_time_refresh=True,\n limit=1,\n ),\n StrInput(\n name=\"column_name\",\n display_name=\"Column Name\",\n info=\"The column name to use for the operation.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"filter_value\",\n display_name=\"Filter Value\",\n info=\"The value to filter rows by.\",\n dynamic=True,\n show=False,\n ),\n DropdownInput(\n name=\"filter_operator\",\n display_name=\"Filter Operator\",\n options=[\n \"equals\",\n \"not equals\",\n \"contains\",\n \"not contains\",\n \"starts with\",\n \"ends with\",\n \"greater than\",\n \"less than\",\n ],\n value=\"equals\",\n info=\"The operator to apply for filtering rows.\",\n advanced=False,\n dynamic=True,\n show=False,\n ),\n BoolInput(\n name=\"ascending\",\n display_name=\"Sort Ascending\",\n info=\"Whether to sort in ascending order.\",\n dynamic=True,\n show=False,\n value=True,\n ),\n StrInput(\n name=\"new_column_name\",\n display_name=\"New Column Name\",\n info=\"The new column name when renaming or adding a column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"new_column_value\",\n display_name=\"New Column Value\",\n info=\"The value to populate the new column with.\",\n dynamic=True,\n show=False,\n ),\n StrInput(\n name=\"columns_to_select\",\n display_name=\"Columns to Select\",\n dynamic=True,\n is_list=True,\n show=False,\n ),\n IntInput(\n name=\"num_rows\",\n display_name=\"Number of Rows\",\n info=\"Number of rows to return (for head/tail).\",\n dynamic=True,\n show=False,\n value=5,\n ),\n MessageTextInput(\n name=\"replace_value\",\n display_name=\"Value to Replace\",\n info=\"The value to replace in the column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"replacement_value\",\n display_name=\"Replacement Value\",\n info=\"The value to replace with.\",\n dynamic=True,\n show=False,\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"DataFrame\",\n name=\"output\",\n method=\"perform_operation\",\n info=\"The resulting DataFrame after the operation.\",\n )\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n dynamic_fields = [\n \"column_name\",\n \"filter_value\",\n \"filter_operator\",\n \"ascending\",\n \"new_column_name\",\n \"new_column_value\",\n \"columns_to_select\",\n \"num_rows\",\n \"replace_value\",\n \"replacement_value\",\n ]\n for field in dynamic_fields:\n build_config[field][\"show\"] = False\n\n if field_name == \"operation\":\n # Handle SortableListInput format\n if isinstance(field_value, list):\n operation_name = field_value[0].get(\"name\", \"\") if field_value else \"\"\n else:\n operation_name = field_value or \"\"\n\n # If no operation selected, all dynamic fields stay hidden (already set to False above)\n if not operation_name:\n return build_config\n\n if operation_name == \"Filter\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"filter_value\"][\"show\"] = True\n build_config[\"filter_operator\"][\"show\"] = True\n elif operation_name == \"Sort\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"ascending\"][\"show\"] = True\n elif operation_name == \"Drop Column\":\n build_config[\"column_name\"][\"show\"] = True\n elif operation_name == \"Rename Column\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"new_column_name\"][\"show\"] = True\n elif operation_name == \"Add Column\":\n build_config[\"new_column_name\"][\"show\"] = True\n build_config[\"new_column_value\"][\"show\"] = True\n elif operation_name == \"Select Columns\":\n build_config[\"columns_to_select\"][\"show\"] = True\n elif operation_name in {\"Head\", \"Tail\"}:\n build_config[\"num_rows\"][\"show\"] = True\n elif operation_name == \"Replace Value\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"replace_value\"][\"show\"] = True\n build_config[\"replacement_value\"][\"show\"] = True\n elif operation_name == \"Drop Duplicates\":\n build_config[\"column_name\"][\"show\"] = True\n\n return build_config\n\n def perform_operation(self) -> DataFrame:\n df_copy = self.df.copy()\n\n # Handle SortableListInput format for operation\n operation_input = getattr(self, \"operation\", [])\n if isinstance(operation_input, list) and len(operation_input) > 0:\n op = operation_input[0].get(\"name\", \"\")\n else:\n op = \"\"\n\n # If no operation selected, return original DataFrame\n if not op:\n return df_copy\n\n if op == \"Filter\":\n return self.filter_rows_by_value(df_copy)\n if op == \"Sort\":\n return self.sort_by_column(df_copy)\n if op == \"Drop Column\":\n return self.drop_column(df_copy)\n if op == \"Rename Column\":\n return self.rename_column(df_copy)\n if op == \"Add Column\":\n return self.add_column(df_copy)\n if op == \"Select Columns\":\n return self.select_columns(df_copy)\n if op == \"Head\":\n return self.head(df_copy)\n if op == \"Tail\":\n return self.tail(df_copy)\n if op == \"Replace Value\":\n return self.replace_values(df_copy)\n if op == \"Drop Duplicates\":\n return self.drop_duplicates(df_copy)\n msg = f\"Unsupported operation: {op}\"\n logger.error(msg)\n raise ValueError(msg)\n\n def filter_rows_by_value(self, df: DataFrame) -> DataFrame:\n column = df[self.column_name]\n filter_value = self.filter_value\n\n # Handle regular DropdownInput format (just a string value)\n operator = getattr(self, \"filter_operator\", \"equals\") # Default to equals for backward compatibility\n\n if operator == \"equals\":\n mask = column == filter_value\n elif operator == \"not equals\":\n mask = column != filter_value\n elif operator == \"contains\":\n mask = column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"not contains\":\n mask = ~column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"starts with\":\n mask = column.astype(str).str.startswith(str(filter_value), na=False)\n elif operator == \"ends with\":\n mask = column.astype(str).str.endswith(str(filter_value), na=False)\n elif operator == \"greater than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column > numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) > str(filter_value)\n elif operator == \"less than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column < numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) < str(filter_value)\n else:\n mask = column == filter_value # Fallback to equals\n\n return DataFrame(df[mask])\n\n def sort_by_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.sort_values(by=self.column_name, ascending=self.ascending))\n\n def drop_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop(columns=[self.column_name]))\n\n def rename_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.rename(columns={self.column_name: self.new_column_name}))\n\n def add_column(self, df: DataFrame) -> DataFrame:\n df[self.new_column_name] = [self.new_column_value] * len(df)\n return DataFrame(df)\n\n def select_columns(self, df: DataFrame) -> DataFrame:\n columns = [col.strip() for col in self.columns_to_select]\n return DataFrame(df[columns])\n\n def head(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.head(self.num_rows))\n\n def tail(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.tail(self.num_rows))\n\n def replace_values(self, df: DataFrame) -> DataFrame:\n df[self.column_name] = df[self.column_name].replace(self.replace_value, self.replacement_value)\n return DataFrame(df)\n\n def drop_duplicates(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop_duplicates(subset=self.column_name))\n" }, "column_name": { "_input_type": "StrInput", @@ -2061,12 +1627,14 @@ "list_add_label": "Add More", "load_from_db": false, "name": "column_name", + "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -2080,12 +1648,14 @@ "list_add_label": "Add More", "load_from_db": false, "name": "columns_to_select", + "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -2101,6 +1671,7 @@ "list": false, "list_add_label": "Add More", "name": "df", + "override_skip": false, "placeholder": "", "required": true, "show": true, @@ -2108,6 +1679,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "other", "value": "" }, @@ -2132,6 +1704,7 @@ "less than" ], "options_metadata": [], + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -2139,6 +1712,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "equals" }, @@ -2155,6 +1729,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "filter_value", + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -2162,9 +1737,11 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, + "is_refresh": false, "new_column_name": { "_input_type": "StrInput", "advanced": false, @@ -2175,12 +1752,14 @@ "list_add_label": "Add More", "load_from_db": false, "name": "new_column_name", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "mimetype" }, @@ -2197,6 +1776,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "new_column_value", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -2204,6 +1784,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "text/html" }, @@ -2216,12 +1797,14 @@ "list": false, "list_add_label": "Add More", "name": "num_rows", + "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": 5 }, @@ -2232,6 +1815,7 @@ "dynamic": false, "info": "Select the DataFrame operation to perform.", "limit": 1, + "load_from_db": false, "name": "operation", "options": [ { @@ -2275,6 +1859,7 @@ "name": "Drop Duplicates" } ], + "override_skip": false, "placeholder": "Select Operation", "real_time_refresh": true, "required": false, @@ -2283,6 +1868,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "sortableList", "value": [ { @@ -2306,6 +1892,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "replace_value", + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -2313,6 +1900,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -2329,6 +1917,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "replacement_value", + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -2336,6 +1925,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" } @@ -2360,6 +1950,8 @@ }, { "data": { + "description": "Perform various operations on a DataFrame.", + "display_name": "DataFrame Operations", "id": "DataFrameOperations-RhKoe", "node": { "base_classes": [ @@ -2370,7 +1962,7 @@ "custom_fields": {}, "description": "Perform various operations on a DataFrame.", "display_name": "DataFrame Operations", - "documentation": "https://docs.langflow.org/components-processing#dataframe-operations", + "documentation": "https://docs.langflow.org/dataframe-operations", "edited": false, "field_order": [ "df", @@ -2388,10 +1980,10 @@ ], "frozen": false, "icon": "table", - "last_updated": "2025-11-24T17:58:32.465Z", + "last_updated": "2025-11-25T23:37:45.069Z", "legacy": false, "metadata": { - "code_hash": "b4d6b19b6eef", + "code_hash": "904f4eaebccd", "dependencies": { "dependencies": [ { @@ -2405,7 +1997,7 @@ ], "total_dependencies": 2 }, - "module": "lfx.components.processing.dataframe_operations.DataFrameOperationsComponent" + "module": "custom_components.dataframe_operations" }, "minimized": false, "output_types": [], @@ -2415,6 +2007,7 @@ "cache": true, "display_name": "DataFrame", "group_outputs": false, + "loop_types": null, "method": "perform_operation", "name": "output", "options": null, @@ -2429,6 +2022,12 @@ ], "pinned": false, "template": { + "_frontend_node_flow_id": { + "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, "_type": "Component", "ascending": { "_input_type": "BoolInput", @@ -2439,12 +2038,14 @@ "list": false, "list_add_label": "Add More", "name": "ascending", + "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "bool", "value": true }, @@ -2464,7 +2065,7 @@ "show": true, "title_case": false, "type": "code", - "value": "import pandas as pd\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.inputs import SortableListInput\nfrom lfx.io import BoolInput, DataFrameInput, DropdownInput, IntInput, MessageTextInput, Output, StrInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dataframe import DataFrame\n\n\nclass DataFrameOperationsComponent(Component):\n display_name = \"DataFrame Operations\"\n description = \"Perform various operations on a DataFrame.\"\n documentation: str = \"https://docs.langflow.org/components-processing#dataframe-operations\"\n icon = \"table\"\n name = \"DataFrameOperations\"\n\n OPERATION_CHOICES = [\n \"Add Column\",\n \"Drop Column\",\n \"Filter\",\n \"Head\",\n \"Rename Column\",\n \"Replace Value\",\n \"Select Columns\",\n \"Sort\",\n \"Tail\",\n \"Drop Duplicates\",\n ]\n\n inputs = [\n DataFrameInput(\n name=\"df\",\n display_name=\"DataFrame\",\n info=\"The input DataFrame to operate on.\",\n required=True,\n ),\n SortableListInput(\n name=\"operation\",\n display_name=\"Operation\",\n placeholder=\"Select Operation\",\n info=\"Select the DataFrame operation to perform.\",\n options=[\n {\"name\": \"Add Column\", \"icon\": \"plus\"},\n {\"name\": \"Drop Column\", \"icon\": \"minus\"},\n {\"name\": \"Filter\", \"icon\": \"filter\"},\n {\"name\": \"Head\", \"icon\": \"arrow-up\"},\n {\"name\": \"Rename Column\", \"icon\": \"pencil\"},\n {\"name\": \"Replace Value\", \"icon\": \"replace\"},\n {\"name\": \"Select Columns\", \"icon\": \"columns\"},\n {\"name\": \"Sort\", \"icon\": \"arrow-up-down\"},\n {\"name\": \"Tail\", \"icon\": \"arrow-down\"},\n {\"name\": \"Drop Duplicates\", \"icon\": \"copy-x\"},\n ],\n real_time_refresh=True,\n limit=1,\n ),\n StrInput(\n name=\"column_name\",\n display_name=\"Column Name\",\n info=\"The column name to use for the operation.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"filter_value\",\n display_name=\"Filter Value\",\n info=\"The value to filter rows by.\",\n dynamic=True,\n show=False,\n ),\n DropdownInput(\n name=\"filter_operator\",\n display_name=\"Filter Operator\",\n options=[\n \"equals\",\n \"not equals\",\n \"contains\",\n \"not contains\",\n \"starts with\",\n \"ends with\",\n \"greater than\",\n \"less than\",\n ],\n value=\"equals\",\n info=\"The operator to apply for filtering rows.\",\n advanced=False,\n dynamic=True,\n show=False,\n ),\n BoolInput(\n name=\"ascending\",\n display_name=\"Sort Ascending\",\n info=\"Whether to sort in ascending order.\",\n dynamic=True,\n show=False,\n value=True,\n ),\n StrInput(\n name=\"new_column_name\",\n display_name=\"New Column Name\",\n info=\"The new column name when renaming or adding a column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"new_column_value\",\n display_name=\"New Column Value\",\n info=\"The value to populate the new column with.\",\n dynamic=True,\n show=False,\n ),\n StrInput(\n name=\"columns_to_select\",\n display_name=\"Columns to Select\",\n dynamic=True,\n is_list=True,\n show=False,\n ),\n IntInput(\n name=\"num_rows\",\n display_name=\"Number of Rows\",\n info=\"Number of rows to return (for head/tail).\",\n dynamic=True,\n show=False,\n value=5,\n ),\n MessageTextInput(\n name=\"replace_value\",\n display_name=\"Value to Replace\",\n info=\"The value to replace in the column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"replacement_value\",\n display_name=\"Replacement Value\",\n info=\"The value to replace with.\",\n dynamic=True,\n show=False,\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"DataFrame\",\n name=\"output\",\n method=\"perform_operation\",\n info=\"The resulting DataFrame after the operation.\",\n )\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n dynamic_fields = [\n \"column_name\",\n \"filter_value\",\n \"filter_operator\",\n \"ascending\",\n \"new_column_name\",\n \"new_column_value\",\n \"columns_to_select\",\n \"num_rows\",\n \"replace_value\",\n \"replacement_value\",\n ]\n for field in dynamic_fields:\n build_config[field][\"show\"] = False\n\n if field_name == \"operation\":\n # Handle SortableListInput format\n if isinstance(field_value, list):\n operation_name = field_value[0].get(\"name\", \"\") if field_value else \"\"\n else:\n operation_name = field_value or \"\"\n\n # If no operation selected, all dynamic fields stay hidden (already set to False above)\n if not operation_name:\n return build_config\n\n if operation_name == \"Filter\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"filter_value\"][\"show\"] = True\n build_config[\"filter_operator\"][\"show\"] = True\n elif operation_name == \"Sort\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"ascending\"][\"show\"] = True\n elif operation_name == \"Drop Column\":\n build_config[\"column_name\"][\"show\"] = True\n elif operation_name == \"Rename Column\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"new_column_name\"][\"show\"] = True\n elif operation_name == \"Add Column\":\n build_config[\"new_column_name\"][\"show\"] = True\n build_config[\"new_column_value\"][\"show\"] = True\n elif operation_name == \"Select Columns\":\n build_config[\"columns_to_select\"][\"show\"] = True\n elif operation_name in {\"Head\", \"Tail\"}:\n build_config[\"num_rows\"][\"show\"] = True\n elif operation_name == \"Replace Value\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"replace_value\"][\"show\"] = True\n build_config[\"replacement_value\"][\"show\"] = True\n elif operation_name == \"Drop Duplicates\":\n build_config[\"column_name\"][\"show\"] = True\n\n return build_config\n\n def perform_operation(self) -> DataFrame:\n df_copy = self.df.copy()\n\n # Handle SortableListInput format for operation\n operation_input = getattr(self, \"operation\", [])\n if isinstance(operation_input, list) and len(operation_input) > 0:\n op = operation_input[0].get(\"name\", \"\")\n else:\n op = \"\"\n\n # If no operation selected, return original DataFrame\n if not op:\n return df_copy\n\n if op == \"Filter\":\n return self.filter_rows_by_value(df_copy)\n if op == \"Sort\":\n return self.sort_by_column(df_copy)\n if op == \"Drop Column\":\n return self.drop_column(df_copy)\n if op == \"Rename Column\":\n return self.rename_column(df_copy)\n if op == \"Add Column\":\n return self.add_column(df_copy)\n if op == \"Select Columns\":\n return self.select_columns(df_copy)\n if op == \"Head\":\n return self.head(df_copy)\n if op == \"Tail\":\n return self.tail(df_copy)\n if op == \"Replace Value\":\n return self.replace_values(df_copy)\n if op == \"Drop Duplicates\":\n return self.drop_duplicates(df_copy)\n msg = f\"Unsupported operation: {op}\"\n logger.error(msg)\n raise ValueError(msg)\n\n def filter_rows_by_value(self, df: DataFrame) -> DataFrame:\n column = df[self.column_name]\n filter_value = self.filter_value\n\n # Handle regular DropdownInput format (just a string value)\n operator = getattr(self, \"filter_operator\", \"equals\") # Default to equals for backward compatibility\n\n if operator == \"equals\":\n mask = column == filter_value\n elif operator == \"not equals\":\n mask = column != filter_value\n elif operator == \"contains\":\n mask = column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"not contains\":\n mask = ~column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"starts with\":\n mask = column.astype(str).str.startswith(str(filter_value), na=False)\n elif operator == \"ends with\":\n mask = column.astype(str).str.endswith(str(filter_value), na=False)\n elif operator == \"greater than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column > numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) > str(filter_value)\n elif operator == \"less than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column < numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) < str(filter_value)\n else:\n mask = column == filter_value # Fallback to equals\n\n return DataFrame(df[mask])\n\n def sort_by_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.sort_values(by=self.column_name, ascending=self.ascending))\n\n def drop_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop(columns=[self.column_name]))\n\n def rename_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.rename(columns={self.column_name: self.new_column_name}))\n\n def add_column(self, df: DataFrame) -> DataFrame:\n df[self.new_column_name] = [self.new_column_value] * len(df)\n return DataFrame(df)\n\n def select_columns(self, df: DataFrame) -> DataFrame:\n columns = [col.strip() for col in self.columns_to_select]\n return DataFrame(df[columns])\n\n def head(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.head(self.num_rows))\n\n def tail(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.tail(self.num_rows))\n\n def replace_values(self, df: DataFrame) -> DataFrame:\n df[self.column_name] = df[self.column_name].replace(self.replace_value, self.replacement_value)\n return DataFrame(df)\n\n def drop_duplicates(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop_duplicates(subset=self.column_name))\n" + "value": "import pandas as pd\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.inputs import SortableListInput\nfrom lfx.io import BoolInput, DataFrameInput, DropdownInput, IntInput, MessageTextInput, Output, StrInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.dataframe import DataFrame\n\n\nclass DataFrameOperationsComponent(Component):\n display_name = \"DataFrame Operations\"\n description = \"Perform various operations on a DataFrame.\"\n documentation: str = \"https://docs.langflow.org/dataframe-operations\"\n icon = \"table\"\n name = \"DataFrameOperations\"\n\n OPERATION_CHOICES = [\n \"Add Column\",\n \"Drop Column\",\n \"Filter\",\n \"Head\",\n \"Rename Column\",\n \"Replace Value\",\n \"Select Columns\",\n \"Sort\",\n \"Tail\",\n \"Drop Duplicates\",\n ]\n\n inputs = [\n DataFrameInput(\n name=\"df\",\n display_name=\"DataFrame\",\n info=\"The input DataFrame to operate on.\",\n required=True,\n ),\n SortableListInput(\n name=\"operation\",\n display_name=\"Operation\",\n placeholder=\"Select Operation\",\n info=\"Select the DataFrame operation to perform.\",\n options=[\n {\"name\": \"Add Column\", \"icon\": \"plus\"},\n {\"name\": \"Drop Column\", \"icon\": \"minus\"},\n {\"name\": \"Filter\", \"icon\": \"filter\"},\n {\"name\": \"Head\", \"icon\": \"arrow-up\"},\n {\"name\": \"Rename Column\", \"icon\": \"pencil\"},\n {\"name\": \"Replace Value\", \"icon\": \"replace\"},\n {\"name\": \"Select Columns\", \"icon\": \"columns\"},\n {\"name\": \"Sort\", \"icon\": \"arrow-up-down\"},\n {\"name\": \"Tail\", \"icon\": \"arrow-down\"},\n {\"name\": \"Drop Duplicates\", \"icon\": \"copy-x\"},\n ],\n real_time_refresh=True,\n limit=1,\n ),\n StrInput(\n name=\"column_name\",\n display_name=\"Column Name\",\n info=\"The column name to use for the operation.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"filter_value\",\n display_name=\"Filter Value\",\n info=\"The value to filter rows by.\",\n dynamic=True,\n show=False,\n ),\n DropdownInput(\n name=\"filter_operator\",\n display_name=\"Filter Operator\",\n options=[\n \"equals\",\n \"not equals\",\n \"contains\",\n \"not contains\",\n \"starts with\",\n \"ends with\",\n \"greater than\",\n \"less than\",\n ],\n value=\"equals\",\n info=\"The operator to apply for filtering rows.\",\n advanced=False,\n dynamic=True,\n show=False,\n ),\n BoolInput(\n name=\"ascending\",\n display_name=\"Sort Ascending\",\n info=\"Whether to sort in ascending order.\",\n dynamic=True,\n show=False,\n value=True,\n ),\n StrInput(\n name=\"new_column_name\",\n display_name=\"New Column Name\",\n info=\"The new column name when renaming or adding a column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"new_column_value\",\n display_name=\"New Column Value\",\n info=\"The value to populate the new column with.\",\n dynamic=True,\n show=False,\n ),\n StrInput(\n name=\"columns_to_select\",\n display_name=\"Columns to Select\",\n dynamic=True,\n is_list=True,\n show=False,\n ),\n IntInput(\n name=\"num_rows\",\n display_name=\"Number of Rows\",\n info=\"Number of rows to return (for head/tail).\",\n dynamic=True,\n show=False,\n value=5,\n ),\n MessageTextInput(\n name=\"replace_value\",\n display_name=\"Value to Replace\",\n info=\"The value to replace in the column.\",\n dynamic=True,\n show=False,\n ),\n MessageTextInput(\n name=\"replacement_value\",\n display_name=\"Replacement Value\",\n info=\"The value to replace with.\",\n dynamic=True,\n show=False,\n ),\n ]\n\n outputs = [\n Output(\n display_name=\"DataFrame\",\n name=\"output\",\n method=\"perform_operation\",\n info=\"The resulting DataFrame after the operation.\",\n )\n ]\n\n def update_build_config(self, build_config, field_value, field_name=None):\n dynamic_fields = [\n \"column_name\",\n \"filter_value\",\n \"filter_operator\",\n \"ascending\",\n \"new_column_name\",\n \"new_column_value\",\n \"columns_to_select\",\n \"num_rows\",\n \"replace_value\",\n \"replacement_value\",\n ]\n for field in dynamic_fields:\n build_config[field][\"show\"] = False\n\n if field_name == \"operation\":\n # Handle SortableListInput format\n if isinstance(field_value, list):\n operation_name = field_value[0].get(\"name\", \"\") if field_value else \"\"\n else:\n operation_name = field_value or \"\"\n\n # If no operation selected, all dynamic fields stay hidden (already set to False above)\n if not operation_name:\n return build_config\n\n if operation_name == \"Filter\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"filter_value\"][\"show\"] = True\n build_config[\"filter_operator\"][\"show\"] = True\n elif operation_name == \"Sort\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"ascending\"][\"show\"] = True\n elif operation_name == \"Drop Column\":\n build_config[\"column_name\"][\"show\"] = True\n elif operation_name == \"Rename Column\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"new_column_name\"][\"show\"] = True\n elif operation_name == \"Add Column\":\n build_config[\"new_column_name\"][\"show\"] = True\n build_config[\"new_column_value\"][\"show\"] = True\n elif operation_name == \"Select Columns\":\n build_config[\"columns_to_select\"][\"show\"] = True\n elif operation_name in {\"Head\", \"Tail\"}:\n build_config[\"num_rows\"][\"show\"] = True\n elif operation_name == \"Replace Value\":\n build_config[\"column_name\"][\"show\"] = True\n build_config[\"replace_value\"][\"show\"] = True\n build_config[\"replacement_value\"][\"show\"] = True\n elif operation_name == \"Drop Duplicates\":\n build_config[\"column_name\"][\"show\"] = True\n\n return build_config\n\n def perform_operation(self) -> DataFrame:\n df_copy = self.df.copy()\n\n # Handle SortableListInput format for operation\n operation_input = getattr(self, \"operation\", [])\n if isinstance(operation_input, list) and len(operation_input) > 0:\n op = operation_input[0].get(\"name\", \"\")\n else:\n op = \"\"\n\n # If no operation selected, return original DataFrame\n if not op:\n return df_copy\n\n if op == \"Filter\":\n return self.filter_rows_by_value(df_copy)\n if op == \"Sort\":\n return self.sort_by_column(df_copy)\n if op == \"Drop Column\":\n return self.drop_column(df_copy)\n if op == \"Rename Column\":\n return self.rename_column(df_copy)\n if op == \"Add Column\":\n return self.add_column(df_copy)\n if op == \"Select Columns\":\n return self.select_columns(df_copy)\n if op == \"Head\":\n return self.head(df_copy)\n if op == \"Tail\":\n return self.tail(df_copy)\n if op == \"Replace Value\":\n return self.replace_values(df_copy)\n if op == \"Drop Duplicates\":\n return self.drop_duplicates(df_copy)\n msg = f\"Unsupported operation: {op}\"\n logger.error(msg)\n raise ValueError(msg)\n\n def filter_rows_by_value(self, df: DataFrame) -> DataFrame:\n column = df[self.column_name]\n filter_value = self.filter_value\n\n # Handle regular DropdownInput format (just a string value)\n operator = getattr(self, \"filter_operator\", \"equals\") # Default to equals for backward compatibility\n\n if operator == \"equals\":\n mask = column == filter_value\n elif operator == \"not equals\":\n mask = column != filter_value\n elif operator == \"contains\":\n mask = column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"not contains\":\n mask = ~column.astype(str).str.contains(str(filter_value), na=False)\n elif operator == \"starts with\":\n mask = column.astype(str).str.startswith(str(filter_value), na=False)\n elif operator == \"ends with\":\n mask = column.astype(str).str.endswith(str(filter_value), na=False)\n elif operator == \"greater than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column > numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) > str(filter_value)\n elif operator == \"less than\":\n try:\n # Try to convert filter_value to numeric for comparison\n numeric_value = pd.to_numeric(filter_value)\n mask = column < numeric_value\n except (ValueError, TypeError):\n # If conversion fails, compare as strings\n mask = column.astype(str) < str(filter_value)\n else:\n mask = column == filter_value # Fallback to equals\n\n return DataFrame(df[mask])\n\n def sort_by_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.sort_values(by=self.column_name, ascending=self.ascending))\n\n def drop_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop(columns=[self.column_name]))\n\n def rename_column(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.rename(columns={self.column_name: self.new_column_name}))\n\n def add_column(self, df: DataFrame) -> DataFrame:\n df[self.new_column_name] = [self.new_column_value] * len(df)\n return DataFrame(df)\n\n def select_columns(self, df: DataFrame) -> DataFrame:\n columns = [col.strip() for col in self.columns_to_select]\n return DataFrame(df[columns])\n\n def head(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.head(self.num_rows))\n\n def tail(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.tail(self.num_rows))\n\n def replace_values(self, df: DataFrame) -> DataFrame:\n df[self.column_name] = df[self.column_name].replace(self.replace_value, self.replacement_value)\n return DataFrame(df)\n\n def drop_duplicates(self, df: DataFrame) -> DataFrame:\n return DataFrame(df.drop_duplicates(subset=self.column_name))\n" }, "column_name": { "_input_type": "StrInput", @@ -2476,12 +2077,14 @@ "list_add_label": "Add More", "load_from_db": false, "name": "column_name", + "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -2495,12 +2098,14 @@ "list_add_label": "Add More", "load_from_db": false, "name": "columns_to_select", + "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -2516,6 +2121,7 @@ "list": false, "list_add_label": "Add More", "name": "df", + "override_skip": false, "placeholder": "", "required": true, "show": true, @@ -2523,6 +2129,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "other", "value": "" }, @@ -2547,6 +2154,7 @@ "less than" ], "options_metadata": [], + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -2554,6 +2162,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "equals" }, @@ -2570,6 +2179,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "filter_value", + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -2577,9 +2187,11 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, + "is_refresh": false, "new_column_name": { "_input_type": "StrInput", "advanced": false, @@ -2590,12 +2202,14 @@ "list_add_label": "Add More", "load_from_db": false, "name": "new_column_name", + "override_skip": false, "placeholder": "", "required": false, "show": false, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -2612,6 +2226,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "new_column_value", + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -2619,6 +2234,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -2631,12 +2247,14 @@ "list": false, "list_add_label": "Add More", "name": "num_rows", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "int", "value": 5 }, @@ -2647,6 +2265,7 @@ "dynamic": false, "info": "Select the DataFrame operation to perform.", "limit": 1, + "load_from_db": false, "name": "operation", "options": [ { @@ -2690,6 +2309,7 @@ "name": "Drop Duplicates" } ], + "override_skip": false, "placeholder": "Select Operation", "real_time_refresh": true, "required": false, @@ -2698,6 +2318,7 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "sortableList", "value": [ { @@ -2721,6 +2342,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "replace_value", + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -2728,6 +2350,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -2744,6 +2367,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "replacement_value", + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -2751,6 +2375,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" } @@ -2767,14 +2392,16 @@ "width": 320 }, "position": { - "x": 2773.2060092972047, - "y": 2337.54590413581 + "x": 3227.5026887437266, + "y": 1418.705251721416 }, "selected": false, "type": "genericNode" }, { "data": { + "description": "Get chat inputs from the Playground.", + "display_name": "Chat Input", "id": "ChatInput-sskrk", "node": { "base_classes": [ @@ -2785,7 +2412,7 @@ "custom_fields": {}, "description": "Get chat inputs from the Playground.", "display_name": "Chat Input", - "documentation": "https://docs.langflow.org/components-io#chat-input", + "documentation": "https://docs.langflow.org/chat-input-and-output", "edited": false, "field_order": [ "input_value", @@ -2800,17 +2427,17 @@ "icon": "MessagesSquare", "legacy": false, "metadata": { - "code_hash": "0014a5b41817", + "code_hash": "7a26c54d89ed", "dependencies": { "dependencies": [ { "name": "lfx", - "version": "0.1.13.dev9" + "version": null } ], "total_dependencies": 1 }, - "module": "lfx.components.input_output.chat.ChatInput" + "module": "custom_components.chat_input" }, "minimized": true, "output_types": [], @@ -2820,8 +2447,11 @@ "cache": true, "display_name": "Chat Message", "group_outputs": false, + "loop_types": null, "method": "message_response", "name": "message", + "options": null, + "required_inputs": null, "selected": "Message", "tool_mode": true, "types": [ @@ -2849,7 +2479,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from lfx.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-input\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n # Ensure files is a list and filter out empty/None values\n files = self.files if self.files else []\n if files and not isinstance(files, list):\n files = [files]\n # Filter out None/empty values\n files = [f for f in files if f is not None and f != \"\"]\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n context_id=self.context_id,\n files=files,\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" + "value": "from lfx.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/chat-input-and-output\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n # Ensure files is a list and filter out empty/None values\n files = self.files if self.files else []\n if files and not isinstance(files, list):\n files = [files]\n # Filter out None/empty values\n files = [f for f in files if f is not None and f != \"\"]\n\n session_id = self.session_id or self.graph.session_id or \"\"\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=session_id,\n context_id=self.context_id,\n files=files,\n )\n if session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n" }, "context_id": { "_input_type": "MessageTextInput", @@ -2864,6 +2494,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "context_id", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -2871,6 +2502,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -2909,6 +2541,7 @@ "list": true, "list_add_label": "Add More", "name": "files", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -2916,12 +2549,14 @@ "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "file", "value": "" }, "input_value": { "_input_type": "MultilineInput", "advanced": false, + "ai_enabled": false, "copy_field": false, "display_name": "Input Text", "dynamic": false, @@ -2932,6 +2567,7 @@ "load_from_db": false, "multiline": true, "name": "input_value", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -2939,6 +2575,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "www.langflow.org" }, @@ -2957,6 +2594,7 @@ "User" ], "options_metadata": [], + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -2964,6 +2602,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "User" }, @@ -2980,6 +2619,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "sender_name", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -2987,6 +2627,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "User" }, @@ -3003,6 +2644,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "session_id", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -3010,6 +2652,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -3022,12 +2665,14 @@ "list": false, "list_add_label": "Add More", "name": "should_store_message", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "bool", "value": true } @@ -3052,6 +2697,8 @@ }, { "data": { + "description": "Display a chat message in the Playground.", + "display_name": "Chat Output", "id": "ChatOutput-0XHyo", "node": { "base_classes": [ @@ -3062,7 +2709,7 @@ "custom_fields": {}, "description": "Display a chat message in the Playground.", "display_name": "Chat Output", - "documentation": "https://docs.langflow.org/components-io#chat-output", + "documentation": "https://docs.langflow.org/chat-input-and-output", "edited": false, "field_order": [ "input_value", @@ -3078,7 +2725,7 @@ "icon": "MessagesSquare", "legacy": false, "metadata": { - "code_hash": "4848ad3e35d5", + "code_hash": "cae45e2d53f6", "dependencies": { "dependencies": [ { @@ -3087,16 +2734,16 @@ }, { "name": "fastapi", - "version": "0.119.1" + "version": "0.120.0" }, { "name": "lfx", - "version": "0.1.13.dev9" + "version": null } ], "total_dependencies": 3 }, - "module": "lfx.components.input_output.chat_output.ChatOutput" + "module": "custom_components.chat_output" }, "minimized": true, "output_types": [], @@ -3106,8 +2753,11 @@ "cache": true, "display_name": "Output Message", "group_outputs": false, + "loop_types": null, "method": "message_response", "name": "message", + "options": null, + "required_inputs": null, "selected": "Message", "tool_mode": true, "types": [ @@ -3128,12 +2778,14 @@ "list": false, "list_add_label": "Add More", "name": "clean_data", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "bool", "value": true }, @@ -3153,7 +2805,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n advanced=True,\n info=\"Whether to clean data before converting to string.\",\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, _, display_name, source_id = self.get_properties_from_source_component()\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.context_id = self.context_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n clean_data: bool = getattr(self, \"clean_data\", False)\n return \"\\n\".join([safe_convert(item, clean_data=clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" + "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/chat-input-and-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n advanced=True,\n info=\"Whether to clean data before converting to string.\",\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, _, display_name, source_id = self.get_properties_from_source_component()\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message) and not self.is_connected_to_chat_input():\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id or self.graph.session_id or \"\"\n message.context_id = self.context_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n\n # Store message if needed\n if message.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n clean_data: bool = getattr(self, \"clean_data\", False)\n return \"\\n\".join([safe_convert(item, clean_data=clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" }, "context_id": { "_input_type": "MessageTextInput", @@ -3168,6 +2820,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "context_id", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -3175,6 +2828,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -3191,6 +2845,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "data_template", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -3198,6 +2853,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "{text}" }, @@ -3215,11 +2871,13 @@ "list": false, "list_add_label": "Add More", "name": "input_value", + "override_skip": false, "placeholder": "", "required": true, "show": true, "title_case": false, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "other", "value": "" }, @@ -3238,6 +2896,7 @@ "User" ], "options_metadata": [], + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -3245,6 +2904,7 @@ "toggle": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "str", "value": "Machine" }, @@ -3261,6 +2921,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "sender_name", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -3268,6 +2929,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "AI" }, @@ -3284,6 +2946,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "session_id", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -3291,6 +2954,7 @@ "tool_mode": false, "trace_as_input": true, "trace_as_metadata": true, + "track_in_telemetry": false, "type": "str", "value": "" }, @@ -3303,12 +2967,14 @@ "list": false, "list_add_label": "Add More", "name": "should_store_message", + "override_skip": false, "placeholder": "", "required": false, "show": true, "title_case": false, "tool_mode": false, "trace_as_metadata": true, + "track_in_telemetry": true, "type": "bool", "value": true } @@ -3325,14 +2991,16 @@ "width": 192 }, "position": { - "x": 3171.102087280453, - "y": 2558.197734507858 + "x": 3823.513108139744, + "y": 1571.0417919511297 }, "selected": false, "type": "genericNode" }, { "data": { + "description": "Generate embeddings using a specified provider.", + "display_name": "Embedding Model", "id": "EmbeddingModel-XjV5v", "node": { "base_classes": [ @@ -3364,9 +3032,10 @@ ], "frozen": false, "icon": "binary", + "last_updated": "2025-11-25T23:37:44.980Z", "legacy": false, "metadata": { - "code_hash": "c5e0a4535a27", + "code_hash": "9e44c83a5058", "dependencies": { "dependencies": [ { @@ -3383,7 +3052,7 @@ }, { "name": "lfx", - "version": "0.2.0.dev19" + "version": null }, { "name": "langchain_ollama", @@ -3410,6 +3079,7 @@ "cache": true, "display_name": "Embedding Model", "group_outputs": false, + "loop_types": null, "method": "build_embeddings", "name": "embeddings", "options": null, @@ -3424,6 +3094,12 @@ ], "pinned": false, "template": { + "_frontend_node_flow_id": { + "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, "_type": "Component", "api_base": { "_input_type": "MessageTextInput", @@ -3438,6 +3114,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "api_base", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -3458,6 +3135,7 @@ "input_types": [], "load_from_db": true, "name": "api_key", + "override_skip": false, "password": true, "placeholder": "", "real_time_refresh": true, @@ -3487,6 +3165,7 @@ "https://ca-tor.ml.cloud.ibm.com" ], "options_metadata": [], + "override_skip": false, "placeholder": "", "real_time_refresh": true, "required": false, @@ -3508,6 +3187,7 @@ "list": false, "list_add_label": "Add More", "name": "chunk_size", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -3534,7 +3214,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n return OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n return OllamaEmbeddings(\n model=model,\n base_url=transformed_base_url or \"http://localhost:11434\",\n **model_kwargs,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n credentials = Credentials(\n api_key=self.api_key,\n url=base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\",\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n return WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" }, "dimensions": { "_input_type": "IntInput", @@ -3545,6 +3225,7 @@ "list": false, "list_add_label": "Add More", "name": "dimensions", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -3564,6 +3245,7 @@ "list": false, "list_add_label": "Add More", "name": "input_text", + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -3574,6 +3256,7 @@ "type": "bool", "value": true }, + "is_refresh": false, "max_retries": { "_input_type": "IntInput", "advanced": true, @@ -3583,6 +3266,7 @@ "list": false, "list_add_label": "Add More", "name": "max_retries", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -3609,6 +3293,7 @@ "text-embedding-ada-002" ], "options_metadata": [], + "override_skip": false, "placeholder": "", "real_time_refresh": true, "refresh_button": true, @@ -3631,6 +3316,7 @@ "list": false, "list_add_label": "Add More", "name": "model_kwargs", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -3654,6 +3340,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "ollama_base_url", + "override_skip": false, "placeholder": "", "real_time_refresh": true, "required": false, @@ -3679,6 +3366,7 @@ "list_add_label": "Add More", "load_from_db": false, "name": "project_id", + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -3716,6 +3404,7 @@ "icon": "WatsonxAI" } ], + "override_skip": false, "placeholder": "", "real_time_refresh": true, "required": false, @@ -3737,6 +3426,7 @@ "list": false, "list_add_label": "Add More", "name": "request_timeout", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -3756,6 +3446,7 @@ "list": false, "list_add_label": "Add More", "name": "show_progress_bar", + "override_skip": false, "placeholder": "", "required": false, "show": true, @@ -3775,6 +3466,7 @@ "list": false, "list_add_label": "Add More", "name": "truncate_input_tokens", + "override_skip": false, "placeholder": "", "required": false, "show": false, @@ -3801,22 +3493,2482 @@ "x": 2066.3681917820168, "y": 2053.0594731518368 }, - "selected": true, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGV", + "node": { + "base_classes": [ + "Data", + "DataFrame", + "VectorStore" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.", + "display_name": "OpenSearch (Multi-Model Multi-Embedding)", + "documentation": "", + "edited": false, + "field_order": [ + "docs_metadata", + "opensearch_url", + "index_name", + "engine", + "space_type", + "ef_construction", + "m", + "num_candidates", + "ingest_data", + "search_query", + "should_cache_vector_store", + "embedding", + "embedding_model_name", + "vector_field", + "number_of_results", + "filter_expression", + "auth_mode", + "username", + "password", + "jwt_token", + "jwt_header", + "bearer_prefix", + "use_ssl", + "verify_certs" + ], + "frozen": false, + "icon": "OpenSearch", + "last_updated": "2025-11-25T23:38:50.335Z", + "legacy": false, + "metadata": { + "code_hash": "8c78d799fef4", + "dependencies": { + "dependencies": [ + { + "name": "opensearchpy", + "version": "2.8.0" + }, + { + "name": "lfx", + "version": null + } + ], + "total_dependencies": 2 + }, + "module": "lfx.components.elastic.opensearch_multimodal.OpenSearchVectorStoreComponentMultimodalMultiEmbedding" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Search Results", + "group_outputs": false, + "loop_types": null, + "method": "search_documents", + "name": "search_results", + "options": null, + "required_inputs": null, + "selected": "Data", + "tool_mode": true, + "types": [ + "Data" + ], + "value": "__UNDEFINED__" + }, + { + "allows_loop": false, + "cache": true, + "display_name": "DataFrame", + "group_outputs": false, + "loop_types": null, + "method": "as_dataframe", + "name": "dataframe", + "options": null, + "required_inputs": null, + "selected": "DataFrame", + "tool_mode": true, + "types": [ + "DataFrame" + ], + "value": "__UNDEFINED__" + }, + { + "allows_loop": false, + "cache": true, + "display_name": "Vector Store Connection", + "group_outputs": false, + "hidden": false, + "loop_types": null, + "method": "as_vector_store", + "name": "vectorstoreconnection", + "options": null, + "required_inputs": null, + "selected": "VectorStore", + "tool_mode": true, + "types": [ + "VectorStore" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, + "_type": "Component", + "auth_mode": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Authentication Mode", + "dynamic": false, + "external_options": {}, + "info": "Authentication method: 'basic' for username/password authentication, or 'jwt' for JSON Web Token (Bearer) authentication.", + "name": "auth_mode", + "options": [ + "basic", + "jwt" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "jwt" + }, + "bearer_prefix": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Prefix 'Bearer '", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "bearer_prefix", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n self.log(self.ingest_data)\n client = self.build_client()\n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n docs = self.ingest_data or []\n if not docs:\n self.log(\"No documents to ingest.\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n if not embeddings_list:\n msg = \"At least one embedding is required to embed documents.\"\n raise ValueError(msg)\n\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n max_workers = min(max(len(texts), 1), 8)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n return [self._get_embedding_model_name()]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models and isinstance(available_models, dict):\n logger.info(f\"Embedding object {idx} provides {len(available_models)} models via available_models dict\")\n for model_name_key, dedicated_embedding in available_models.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n else:\n # No matching embedding found for this model\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available models: {list(embedding_by_model.keys())}\"\n )\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n\n if not query_embeddings:\n msg = \"Failed to generate embeddings for any model\"\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n raw = self.search(self.search_query or \"\")\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n self.log(self.ingest_data)\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" + }, + "docs_metadata": { + "_input_type": "TableInput", + "advanced": false, + "display_name": "Document Metadata", + "dynamic": false, + "info": "Additional metadata key-value pairs to be added to all ingested documents. Useful for tagging documents with source information, categories, or other custom attributes.", + "input_types": [ + "Data" + ], + "is_list": true, + "list_add_label": "Add More", + "name": "docs_metadata", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "table_icon": "Table", + "table_schema": [ + { + "description": "Key name", + "display_name": "Key", + "formatter": "text", + "name": "key", + "type": "str" + }, + { + "description": "Value of the metadata", + "display_name": "Value", + "formatter": "text", + "name": "value", + "type": "str" + } + ], + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "trigger_icon": "Table", + "trigger_text": "Open table", + "type": "table", + "value": [] + }, + "ef_construction": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "EF Construction", + "dynamic": false, + "info": "Size of the dynamic candidate list during index construction. Higher values improve recall but increase indexing time and memory usage.", + "list": false, + "list_add_label": "Add More", + "name": "ef_construction", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 512 + }, + "embedding": { + "_input_type": "HandleInput", + "advanced": false, + "display_name": "Embedding", + "dynamic": false, + "info": "", + "input_types": [ + "Embeddings" + ], + "list": true, + "list_add_label": "Add More", + "name": "embedding", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "other", + "value": "" + }, + "embedding_model_name": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "Embedding Model Name", + "dynamic": false, + "info": "Name of the embedding model to use for ingestion. This selects which embedding from the list will be used to embed documents. Matches on deployment, model, model_id, or model_name. For duplicate deployments, use combined format: 'deployment:model' (e.g., 'text-embedding-ada-002:text-embedding-3-large'). Leave empty to use the first embedding. Error message will show all available identifiers.", + "list": false, + "list_add_label": "Add More", + "load_from_db": true, + "name": "embedding_model_name", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "SELECTED_EMBEDDING_MODEL" + }, + "engine": { + "_input_type": "DropdownInput", + "advanced": true, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Vector Engine", + "dynamic": false, + "external_options": {}, + "info": "Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.", + "name": "engine", + "options": [ + "jvector", + "nmslib", + "faiss", + "lucene" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "jvector" + }, + "filter_expression": { + "_input_type": "MultilineInput", + "advanced": false, + "ai_enabled": false, + "copy_field": false, + "display_name": "Search Filters (JSON)", + "dynamic": false, + "info": "Optional JSON configuration for search filtering, result limits, and score thresholds.\n\nFormat 1 - Explicit filters:\n{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, {\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\n\nFormat 2 - Context-style mapping:\n{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\n\nUse __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "multiline": true, + "name": "filter_expression", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "index_name": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "Index Name", + "dynamic": false, + "info": "The OpenSearch index name where documents will be stored and searched. Will be created automatically if it doesn't exist.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "index_name", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "langflow" + }, + "ingest_data": { + "_input_type": "HandleInput", + "advanced": false, + "display_name": "Ingest Data", + "dynamic": false, + "info": "", + "input_types": [ + "Data", + "DataFrame" + ], + "list": true, + "list_add_label": "Add More", + "name": "ingest_data", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "other", + "value": "" + }, + "is_refresh": false, + "jwt_header": { + "_input_type": "StrInput", + "advanced": true, + "display_name": "JWT Header Name", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "jwt_header", + "override_skip": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "Authorization" + }, + "jwt_token": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "JWT Token", + "dynamic": false, + "info": "Valid JSON Web Token for authentication. Will be sent in the Authorization header (with optional 'Bearer ' prefix).", + "input_types": [], + "load_from_db": true, + "name": "jwt_token", + "override_skip": false, + "password": true, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "track_in_telemetry": false, + "type": "str", + "value": "JWT" + }, + "m": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "M Parameter", + "dynamic": false, + "info": "Number of bidirectional connections for each vector in the HNSW graph. Higher values improve search quality but increase memory usage and indexing time.", + "list": false, + "list_add_label": "Add More", + "name": "m", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 16 + }, + "num_candidates": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Candidate Pool Size", + "dynamic": false, + "info": "Number of approximate neighbors to consider for each KNN query. Some OpenSearch deployments do not support this parameter; set to 0 to disable.", + "list": false, + "list_add_label": "Add More", + "name": "num_candidates", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 1000 + }, + "number_of_results": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Default Result Limit", + "dynamic": false, + "info": "Default maximum number of search results to return when no limit is specified in the filter expression.", + "list": false, + "list_add_label": "Add More", + "name": "number_of_results", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 10 + }, + "opensearch_url": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "OpenSearch URL", + "dynamic": false, + "info": "The connection URL for your OpenSearch cluster (e.g., http://localhost:9200 for local development or your cloud endpoint).", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "opensearch_url", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "http://localhost:9200" + }, + "password": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "OpenSearch Password", + "dynamic": false, + "info": "", + "input_types": [], + "load_from_db": false, + "name": "password", + "override_skip": false, + "password": true, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "search_query": { + "_input_type": "QueryInput", + "advanced": false, + "display_name": "Search Query", + "dynamic": false, + "info": "Enter a query to run a similarity search.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "search_query", + "override_skip": false, + "placeholder": "Enter a query...", + "required": false, + "show": true, + "title_case": false, + "tool_mode": true, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "query", + "value": "" + }, + "should_cache_vector_store": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Cache Vector Store", + "dynamic": false, + "info": "If True, the vector store will be cached for the current build of the component. This is useful for components that have multiple output methods and want to share the same vector store.", + "list": false, + "list_add_label": "Add More", + "name": "should_cache_vector_store", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "space_type": { + "_input_type": "DropdownInput", + "advanced": true, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Distance Metric", + "dynamic": false, + "external_options": {}, + "info": "Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, 'cosinesimil' for cosine similarity, 'innerproduct' for dot product.", + "name": "space_type", + "options": [ + "l2", + "l1", + "cosinesimil", + "linf", + "innerproduct" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "l2" + }, + "use_ssl": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Use SSL/TLS", + "dynamic": false, + "info": "Enable SSL/TLS encryption for secure connections to OpenSearch.", + "list": false, + "list_add_label": "Add More", + "name": "use_ssl", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "username": { + "_input_type": "StrInput", + "advanced": false, + "display_name": "Username", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "username", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "admin" + }, + "vector_field": { + "_input_type": "StrInput", + "advanced": true, + "display_name": "Legacy Vector Field Name", + "dynamic": false, + "info": "Legacy field name for backward compatibility. New documents use dynamic fields (chunk_embedding_{model_name}) based on the embedding_model_name.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "vector_field", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "chunk_embedding" + }, + "verify_certs": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Verify SSL Certificates", + "dynamic": false, + "info": "Verify SSL certificates when connecting. Disable for self-signed certificates in development environments.", + "list": false, + "list_add_label": "Add More", + "name": "verify_certs", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": false + } + }, + "tool_mode": false + }, + "selected_output": "search_results", + "showNode": true, + "type": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding" + }, + "dragging": false, + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-PMGGV", + "measured": { + "height": 904, + "width": 320 + }, + "position": { + "x": 2779.4314297063547, + "y": 1442.1019431938519 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "id": "AdvancedDynamicFormBuilder-ziCu4", + "node": { + "base_classes": [ + "Data", + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Creates dynamic input fields that can receive data from other components or manual input.", + "display_name": "Create Data", + "documentation": "", + "edited": true, + "field_order": [ + "form_fields", + "include_metadata" + ], + "frozen": false, + "icon": "braces", + "last_updated": "2025-11-25T23:37:47.269Z", + "legacy": false, + "lf_version": "1.6.3.dev0", + "metadata": {}, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Data", + "group_outputs": false, + "hidden": null, + "loop_types": null, + "method": "process_form", + "name": "form_data", + "options": null, + "required_inputs": null, + "selected": "Data", + "tool_mode": true, + "types": [ + "Data" + ], + "value": "__UNDEFINED__" + }, + { + "allows_loop": false, + "cache": true, + "display_name": "Message", + "group_outputs": false, + "hidden": null, + "loop_types": null, + "method": "get_message", + "name": "message", + "options": null, + "required_inputs": null, + "selected": "Message", + "tool_mode": true, + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, + "_type": "Component", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Any\r\n\r\nfrom langflow.custom import Component\r\nfrom langflow.io import (\r\n BoolInput,\r\n FloatInput,\r\n HandleInput,\r\n IntInput,\r\n MultilineInput,\r\n Output,\r\n StrInput,\r\n TableInput,\r\n)\r\nfrom langflow.schema.data import Data\r\nfrom langflow.schema.message import Message\r\n\r\n\r\nclass CrateData(Component):\r\n \"\"\"Dynamic Form Component\r\n\r\n This component creates dynamic inputs that can receive data from other components\r\n or be filled manually. It demonstrates advanced dynamic input functionality with\r\n component connectivity.\r\n\r\n ## Features\r\n - **Dynamic Input Generation**: Create inputs based on table configuration\r\n - **Component Connectivity**: Inputs can receive data from other components\r\n - **Multiple Input Types**: Support for text, number, boolean, and handle inputs\r\n - **Flexible Data Sources**: Manual input OR component connections\r\n - **Real-time Updates**: Form fields update immediately when table changes\r\n - **Multiple Output Formats**: Data and formatted Message outputs\r\n - **JSON Output**: Collects all dynamic inputs into a structured JSON response\r\n\r\n ## Use Cases\r\n - Dynamic API parameter collection from multiple sources\r\n - Variable data aggregation from different components\r\n - Flexible pipeline configuration\r\n - Multi-source data processing\r\n\r\n ## Field Types Available\r\n - **text**: Single-line text input (can connect to Text/String outputs)\r\n - **multiline**: Multi-line text input (can connect to Text outputs)\r\n - **number**: Integer input (can connect to Number outputs)\r\n - **float**: Decimal number input (can connect to Number outputs)\r\n - **boolean**: True/false checkbox (can connect to Boolean outputs)\r\n - **handle**: Generic data input (can connect to any component output)\r\n - **data**: Structured data input (can connect to Data outputs)\r\n\r\n ## Input Types for Connections\r\n - **Text**: Text/String data from components\r\n - **Data**: Structured data objects\r\n - **Message**: Message objects with text content\r\n - **Number**: Numeric values\r\n - **Boolean**: True/false values\r\n - **Any**: Accepts any type of connection\r\n - **Combinations**: Text,Message | Data,Text | Text,Data,Message | etc.\r\n \"\"\"\r\n\r\n display_name = \"Create Data\"\r\n description = \"Creates dynamic input fields that can receive data from other components or manual input.\"\r\n icon = \"braces\"\r\n name = \"AdvancedDynamicFormBuilder\"\r\n\r\n def __init__(self, **kwargs):\r\n super().__init__(**kwargs)\r\n self._dynamic_inputs = {}\r\n\r\n inputs = [\r\n TableInput(\r\n name=\"form_fields\",\r\n display_name=\"Input Configuration\",\r\n info=\"Define the dynamic form fields. Each row creates a new input field that can connect to other components.\",\r\n table_schema=[\r\n {\r\n \"name\": \"field_name\",\r\n \"display_name\": \"Field Name\",\r\n \"type\": \"str\",\r\n \"description\": \"Name for the field (used as both internal name and display label)\",\r\n },\r\n {\r\n \"name\": \"field_type\",\r\n \"display_name\": \"Field Type\",\r\n \"type\": \"str\",\r\n \"description\": \"Type of input field to create\",\r\n \"options\": [\"Text\", \"Data\", \"Number\", \"Handle\", \"Boolean\"],\r\n \"value\": \"Text\",\r\n },\r\n ],\r\n value=[{\"field_name\": \"field_name\", \"field_type\": \"Text\"}],\r\n real_time_refresh=True,\r\n ),\r\n BoolInput(\r\n name=\"include_metadata\",\r\n display_name=\"Include Metadata\",\r\n info=\"Include form configuration metadata in the output.\",\r\n value=False,\r\n advanced=True,\r\n ),\r\n ]\r\n\r\n outputs = [\r\n Output(display_name=\"Data\", name=\"form_data\", method=\"process_form\"),\r\n Output(display_name=\"Message\", name=\"message\", method=\"get_message\"),\r\n ]\r\n\r\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str = None) -> dict:\r\n \"\"\"Update build configuration to add dynamic inputs that can connect to other components.\"\"\"\r\n if field_name == \"form_fields\":\r\n # Store current values before clearing dynamic inputs\r\n current_values = {}\r\n keys_to_remove = [key for key in build_config if key.startswith(\"dynamic_\")]\r\n for key in keys_to_remove:\r\n # Preserve the current value before deletion\r\n if hasattr(self, key):\r\n current_values[key] = getattr(self, key)\r\n del build_config[key]\r\n\r\n # Add dynamic inputs based on table configuration\r\n # Safety check to ensure field_value is not None and is iterable\r\n if field_value is None:\r\n field_value = []\r\n\r\n for i, field_config in enumerate(field_value):\r\n # Safety check to ensure field_config is not None\r\n if field_config is None:\r\n continue\r\n\r\n field_name = field_config.get(\"field_name\", f\"field_{i}\")\r\n display_name = field_name # Use field_name as display_name\r\n field_type_option = field_config.get(\"field_type\", \"Text\")\r\n default_value = \"\" # All fields have empty default value\r\n required = False # All fields are optional by default\r\n help_text = \"\" # All fields have empty help text\r\n\r\n # Map field type options to actual field types and input types\r\n field_type_mapping = {\r\n \"Text\": {\"field_type\": \"multiline\", \"input_types\": [\"Text\", \"Message\"]},\r\n \"Data\": {\"field_type\": \"data\", \"input_types\": [\"Data\"]},\r\n \"Number\": {\"field_type\": \"number\", \"input_types\": [\"Text\", \"Message\"]},\r\n \"Handle\": {\"field_type\": \"handle\", \"input_types\": [\"Text\", \"Data\", \"Message\"]},\r\n \"Boolean\": {\"field_type\": \"boolean\", \"input_types\": None},\r\n }\r\n\r\n field_config_mapped = field_type_mapping.get(\r\n field_type_option, {\"field_type\": \"text\", \"input_types\": []}\r\n )\r\n field_type = field_config_mapped[\"field_type\"]\r\n input_types_list = field_config_mapped[\"input_types\"]\r\n\r\n # Create the appropriate input type based on field_type\r\n dynamic_input_name = f\"dynamic_{field_name}\"\r\n\r\n if field_type == \"text\":\r\n # Use preserved value if available, otherwise use default\r\n current_value = current_values.get(dynamic_input_name, default_value)\r\n if current_value is None:\r\n current_value = default_value\r\n \r\n if input_types_list:\r\n build_config[dynamic_input_name] = StrInput(\r\n name=dynamic_input_name,\r\n display_name=display_name,\r\n info=f\"{help_text} (Can connect to: {', '.join(input_types_list)})\",\r\n value=current_value,\r\n required=required,\r\n input_types=input_types_list,\r\n )\r\n else:\r\n build_config[dynamic_input_name] = StrInput(\r\n name=dynamic_input_name,\r\n display_name=display_name,\r\n info=help_text,\r\n value=current_value,\r\n required=required,\r\n )\r\n\r\n elif field_type == \"multiline\":\r\n # Use preserved value if available, otherwise use default\r\n current_value = current_values.get(dynamic_input_name, default_value)\r\n if current_value is None:\r\n current_value = default_value\r\n \r\n if input_types_list:\r\n build_config[dynamic_input_name] = MultilineInput(\r\n name=dynamic_input_name,\r\n display_name=display_name,\r\n info=f\"{help_text} (Can connect to: {', '.join(input_types_list)})\",\r\n value=current_value,\r\n required=required,\r\n input_types=input_types_list,\r\n )\r\n else:\r\n build_config[dynamic_input_name] = MultilineInput(\r\n name=dynamic_input_name,\r\n display_name=display_name,\r\n info=help_text,\r\n value=current_value,\r\n required=required,\r\n )\r\n\r\n elif field_type == \"number\":\r\n # Use preserved value if available, otherwise use default\r\n current_value = current_values.get(dynamic_input_name, default_value)\r\n if current_value is None:\r\n current_value = default_value\r\n \r\n try:\r\n if current_value:\r\n current_int = int(current_value)\r\n else:\r\n current_int = 0\r\n except (ValueError, TypeError):\r\n try:\r\n current_int = int(default_value) if default_value else 0\r\n except ValueError:\r\n current_int = 0\r\n\r\n if input_types_list:\r\n build_config[dynamic_input_name] = IntInput(\r\n name=dynamic_input_name,\r\n display_name=display_name,\r\n info=f\"{help_text} (Can connect to: {', '.join(input_types_list)})\",\r\n value=current_int,\r\n required=required,\r\n input_types=input_types_list,\r\n )\r\n else:\r\n build_config[dynamic_input_name] = IntInput(\r\n name=dynamic_input_name,\r\n display_name=display_name,\r\n info=help_text,\r\n value=current_int,\r\n required=required,\r\n )\r\n\r\n elif field_type == \"float\":\r\n # Use preserved value if available, otherwise use default\r\n current_value = current_values.get(dynamic_input_name, default_value)\r\n if current_value is None:\r\n current_value = default_value\r\n \r\n try:\r\n if current_value:\r\n current_float = float(current_value)\r\n else:\r\n current_float = 0.0\r\n except (ValueError, TypeError):\r\n try:\r\n current_float = float(default_value) if default_value else 0.0\r\n except ValueError:\r\n current_float = 0.0\r\n\r\n if input_types_list:\r\n build_config[dynamic_input_name] = FloatInput(\r\n name=dynamic_input_name,\r\n display_name=display_name,\r\n info=f\"{help_text} (Can connect to: {', '.join(input_types_list)})\",\r\n value=current_float,\r\n required=required,\r\n input_types=input_types_list,\r\n )\r\n else:\r\n build_config[dynamic_input_name] = FloatInput(\r\n name=dynamic_input_name,\r\n display_name=display_name,\r\n info=help_text,\r\n value=current_float,\r\n required=required,\r\n )\r\n\r\n elif field_type == \"boolean\":\r\n # Use preserved value if available, otherwise use default\r\n current_value = current_values.get(dynamic_input_name, default_value)\r\n if current_value is None:\r\n current_value = default_value\r\n \r\n # Convert current value to boolean\r\n if isinstance(current_value, bool):\r\n current_bool = current_value\r\n else:\r\n current_bool = str(current_value).lower() in [\"true\", \"1\", \"yes\"] if current_value else False\r\n\r\n # Boolean fields don't use input_types parameter to avoid errors\r\n build_config[dynamic_input_name] = BoolInput(\r\n name=dynamic_input_name,\r\n display_name=display_name,\r\n info=help_text,\r\n value=current_bool,\r\n input_types=[],\r\n required=required,\r\n )\r\n\r\n elif field_type == \"handle\":\r\n # HandleInput for generic data connections\r\n build_config[dynamic_input_name] = HandleInput(\r\n name=dynamic_input_name,\r\n display_name=display_name,\r\n info=f\"{help_text} (Accepts: {', '.join(input_types_list) if input_types_list else 'Any'})\",\r\n input_types=input_types_list if input_types_list else [\"Data\", \"Text\", \"Message\"],\r\n required=required,\r\n )\r\n\r\n elif field_type == \"data\":\r\n # Specialized for Data type connections\r\n build_config[dynamic_input_name] = HandleInput(\r\n name=dynamic_input_name,\r\n display_name=display_name,\r\n info=f\"{help_text} (Data input)\",\r\n input_types=[\"Data\"] if not input_types_list else input_types_list,\r\n required=required,\r\n )\r\n\r\n else:\r\n # Default to text input for unknown types\r\n # Use preserved value if available, otherwise use default\r\n current_value = current_values.get(dynamic_input_name, default_value)\r\n if current_value is None:\r\n current_value = default_value\r\n \r\n build_config[dynamic_input_name] = StrInput(\r\n name=dynamic_input_name,\r\n display_name=display_name,\r\n info=f\"{help_text} (Unknown type '{field_type}', defaulting to text)\",\r\n value=current_value,\r\n required=required,\r\n )\r\n\r\n return build_config\r\n\r\n def get_dynamic_values(self) -> dict[str, Any]:\r\n \"\"\"Extract simple values from all dynamic inputs, handling both manual and connected inputs.\"\"\"\r\n dynamic_values = {}\r\n connection_info = {}\r\n form_fields = getattr(self, \"form_fields\", [])\r\n\r\n for field_config in form_fields:\r\n # Safety check to ensure field_config is not None\r\n if field_config is None:\r\n continue\r\n\r\n field_name = field_config.get(\"field_name\", \"\")\r\n if field_name:\r\n dynamic_input_name = f\"dynamic_{field_name}\"\r\n value = getattr(self, dynamic_input_name, None)\r\n\r\n # Extract simple values from connections or manual input\r\n if value is not None:\r\n try:\r\n extracted_value = self._extract_simple_value(value)\r\n dynamic_values[field_name] = extracted_value\r\n\r\n # Determine connection type for status\r\n if hasattr(value, \"text\") and hasattr(value, \"timestamp\"):\r\n connection_info[field_name] = \"Connected (Message)\"\r\n elif hasattr(value, \"data\"):\r\n connection_info[field_name] = \"Connected (Data)\"\r\n elif isinstance(value, (str, int, float, bool, list, dict)):\r\n connection_info[field_name] = \"Manual input\"\r\n else:\r\n connection_info[field_name] = \"Connected (Object)\"\r\n\r\n except Exception:\r\n # Fallback to string representation if all else fails\r\n dynamic_values[field_name] = str(value)\r\n connection_info[field_name] = \"Error\"\r\n else:\r\n # Use empty default value if nothing connected\r\n dynamic_values[field_name] = \"\"\r\n connection_info[field_name] = \"Empty default\"\r\n\r\n # Store connection info for status output\r\n self._connection_info = connection_info\r\n return dynamic_values\r\n\r\n def _extract_simple_value(self, value: Any) -> Any:\r\n \"\"\"Extract the simplest, most useful value from any input type.\"\"\"\r\n # Handle None\r\n if value is None:\r\n return None\r\n\r\n # Handle simple types directly\r\n if isinstance(value, (str, int, float, bool)):\r\n return value\r\n\r\n # Handle lists and tuples - keep simple\r\n if isinstance(value, (list, tuple)):\r\n return [self._extract_simple_value(item) for item in value]\r\n\r\n # Handle dictionaries - keep simple\r\n if isinstance(value, dict):\r\n return {str(k): self._extract_simple_value(v) for k, v in value.items()}\r\n\r\n # Handle Message objects - extract only the text\r\n if hasattr(value, \"text\"):\r\n return str(value.text) if value.text is not None else \"\"\r\n\r\n # Handle Data objects - extract the data content\r\n if hasattr(value, \"data\") and value.data is not None:\r\n return self._extract_simple_value(value.data)\r\n\r\n # For any other object, convert to string\r\n return str(value)\r\n\r\n def process_form(self) -> Data:\r\n \"\"\"Process all dynamic form inputs and return clean data with just field values.\"\"\"\r\n # Get all dynamic values (just the key:value pairs)\r\n dynamic_values = self.get_dynamic_values()\r\n\r\n # Update status with connection info\r\n connected_fields = len([v for v in getattr(self, \"_connection_info\", {}).values() if \"Connected\" in v])\r\n total_fields = len(dynamic_values)\r\n\r\n self.status = f\"Form processed successfully. {connected_fields}/{total_fields} fields connected to components.\"\r\n\r\n # Return clean Data object with just the field values\r\n return Data(data=dynamic_values)\r\n\r\n def get_message(self) -> Message:\r\n \"\"\"Return form data as a formatted text message.\"\"\"\r\n # Get all dynamic values\r\n dynamic_values = self.get_dynamic_values()\r\n\r\n if not dynamic_values:\r\n return Message(text=\"No form data available\")\r\n\r\n # Format as text message\r\n message_lines = [\"📋 Form Data:\"]\r\n message_lines.append(\"=\" * 40)\r\n\r\n for field_name, value in dynamic_values.items():\r\n # Use field_name as display_name\r\n display_name = field_name\r\n\r\n message_lines.append(f\"• {display_name}: {value}\")\r\n\r\n message_lines.append(\"=\" * 40)\r\n message_lines.append(f\"Total fields: {len(dynamic_values)}\")\r\n\r\n message_text = \"\\n\".join(message_lines)\r\n self.status = f\"Message formatted with {len(dynamic_values)} fields\"\r\n\r\n return Message(text=message_text)" + }, + "dynamic_connector_type": { + "_input_type": "MultilineInput", + "advanced": false, + "ai_enabled": false, + "copy_field": false, + "display_name": "connector_type", + "dynamic": false, + "helper_text": null, + "info": " (Can connect to: Text, Message)", + "input_types": [ + "Text", + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "multiline": true, + "name": "dynamic_connector_type", + "override_skip": false, + "placeholder": "", + "real_time_refresh": null, + "refresh_button": null, + "refresh_button_text": null, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "dynamic_owner": { + "_input_type": "MultilineInput", + "advanced": false, + "ai_enabled": false, + "copy_field": false, + "display_name": "owner", + "dynamic": false, + "helper_text": null, + "info": " (Can connect to: Text, Message)", + "input_types": [ + "Text", + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "multiline": true, + "name": "dynamic_owner", + "override_skip": false, + "placeholder": "", + "real_time_refresh": null, + "refresh_button": null, + "refresh_button_text": null, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "dynamic_owner_email": { + "_input_type": "MultilineInput", + "advanced": false, + "ai_enabled": false, + "copy_field": false, + "display_name": "owner_email", + "dynamic": false, + "helper_text": null, + "info": " (Can connect to: Text, Message)", + "input_types": [ + "Text", + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "multiline": true, + "name": "dynamic_owner_email", + "override_skip": false, + "placeholder": "", + "real_time_refresh": null, + "refresh_button": null, + "refresh_button_text": null, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "dynamic_owner_name": { + "_input_type": "MultilineInput", + "advanced": false, + "ai_enabled": false, + "copy_field": false, + "display_name": "owner_name", + "dynamic": false, + "helper_text": null, + "info": " (Can connect to: Text, Message)", + "input_types": [ + "Text", + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "multiline": true, + "name": "dynamic_owner_name", + "override_skip": false, + "placeholder": "", + "real_time_refresh": null, + "refresh_button": null, + "refresh_button_text": null, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "form_fields": { + "_input_type": "TableInput", + "advanced": false, + "display_name": "Input Configuration", + "dynamic": false, + "info": "Define the dynamic form fields. Each row creates a new input field that can connect to other components.", + "is_list": true, + "list_add_label": "Add More", + "load_from_db": false, + "name": "form_fields", + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "table_icon": "Table", + "table_schema": { + "columns": [ + { + "default": "None", + "description": "Name for the field (used as both internal name and display label)", + "disable_edit": false, + "display_name": "Field Name", + "edit_mode": "popover", + "filterable": true, + "formatter": "text", + "hidden": false, + "name": "field_name", + "sortable": true, + "type": "str" + }, + { + "default": "None", + "description": "Type of input field to create", + "disable_edit": false, + "display_name": "Field Type", + "edit_mode": "popover", + "filterable": true, + "formatter": "text", + "hidden": false, + "name": "field_type", + "options": [ + "Text", + "Data", + "Number", + "Handle", + "Boolean" + ], + "sortable": true, + "type": "str" + } + ] + }, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "trigger_icon": "Table", + "trigger_text": "Open table", + "type": "table", + "value": [ + { + "field_name": "owner", + "field_type": "Text" + }, + { + "field_name": "owner_name", + "field_type": "Text" + }, + { + "field_name": "owner_email", + "field_type": "Text" + }, + { + "field_name": "connector_type", + "field_type": "Text" + } + ] + }, + "include_metadata": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Include Metadata", + "dynamic": false, + "info": "Include form configuration metadata in the output.", + "list": false, + "list_add_label": "Add More", + "name": "include_metadata", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "bool", + "value": false + }, + "is_refresh": false + }, + "tool_mode": false + }, + "selected_output": "form_data", + "showNode": true, + "type": "AdvancedDynamicFormBuilder" + }, + "dragging": false, + "id": "AdvancedDynamicFormBuilder-ziCu4", + "measured": { + "height": 552, + "width": 320 + }, + "position": { + "x": 2393.139136901506, + "y": 595.9579209002084 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "id": "SecretInput-lr9k6", + "node": { + "base_classes": [ + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Allows the selection of a secret to be generated as output..", + "display_name": "Secret Input", + "documentation": "https://docs.langflow.org/components-io#text-input", + "edited": true, + "field_order": [ + "input_value" + ], + "frozen": false, + "icon": "type", + "legacy": false, + "lf_version": "1.6.3.dev0", + "metadata": {}, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Output Text", + "group_outputs": false, + "hidden": null, + "method": "text_response", + "name": "text", + "options": null, + "required_inputs": null, + "selected": "Message", + "tool_mode": true, + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_type": "Component", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from langflow.base.io.text import TextComponent\r\nfrom langflow.io import MultilineInput, Output, SecretStrInput\r\nfrom langflow.schema.message import Message\r\n\r\n\r\nclass SecretInputComponent(TextComponent):\r\n display_name = \"Secret Input\"\r\n description = \"Allows the selection of a secret to be generated as output..\"\r\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\r\n icon = \"type\"\r\n name = \"SecretInput\"\r\n\r\n inputs = [\r\n SecretStrInput(\r\n name=\"input_value\",\r\n display_name=\"Secret\",\r\n info=\"Secret to be passed as input.\",\r\n ),\r\n ]\r\n outputs = [\r\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\r\n ]\r\n\r\n def text_response(self) -> Message:\r\n return Message(\r\n text=self.input_value,\r\n )\r\n" + }, + "input_value": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "Secret", + "dynamic": false, + "info": "Secret to be passed as input.", + "input_types": [], + "load_from_db": true, + "name": "input_value", + "password": true, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "CONNECTOR_TYPE" + } + }, + "tool_mode": false + }, + "showNode": true, + "type": "SecretInput" + }, + "dragging": false, + "id": "SecretInput-lr9k6", + "measured": { + "height": 220, + "width": 320 + }, + "position": { + "x": 1957.8516847253545, + "y": 426.1185670449811 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "id": "SecretInput-KYwsB", + "node": { + "base_classes": [ + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Allows the selection of a secret to be generated as output..", + "display_name": "Secret Input", + "documentation": "https://docs.langflow.org/components-io#text-input", + "edited": true, + "field_order": [ + "input_value" + ], + "frozen": false, + "icon": "type", + "legacy": false, + "lf_version": "1.6.3.dev0", + "metadata": {}, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Output Text", + "group_outputs": false, + "hidden": null, + "method": "text_response", + "name": "text", + "options": null, + "required_inputs": null, + "selected": "Message", + "tool_mode": true, + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_type": "Component", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from langflow.base.io.text import TextComponent\r\nfrom langflow.io import MultilineInput, Output, SecretStrInput\r\nfrom langflow.schema.message import Message\r\n\r\n\r\nclass SecretInputComponent(TextComponent):\r\n display_name = \"Secret Input\"\r\n description = \"Allows the selection of a secret to be generated as output..\"\r\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\r\n icon = \"type\"\r\n name = \"SecretInput\"\r\n\r\n inputs = [\r\n SecretStrInput(\r\n name=\"input_value\",\r\n display_name=\"Secret\",\r\n info=\"Secret to be passed as input.\",\r\n ),\r\n ]\r\n outputs = [\r\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\r\n ]\r\n\r\n def text_response(self) -> Message:\r\n return Message(\r\n text=self.input_value,\r\n )\r\n" + }, + "input_value": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "Secret", + "dynamic": false, + "info": "Secret to be passed as input.", + "input_types": [], + "load_from_db": true, + "name": "input_value", + "password": true, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "OWNER" + } + }, + "tool_mode": false + }, + "showNode": true, + "type": "SecretInput" + }, + "dragging": false, + "id": "SecretInput-KYwsB", + "measured": { + "height": 220, + "width": 320 + }, + "position": { + "x": 1956.094514779677, + "y": 676.2918713576739 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "id": "SecretInput-pYHMH", + "node": { + "base_classes": [ + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Allows the selection of a secret to be generated as output..", + "display_name": "Secret Input", + "documentation": "https://docs.langflow.org/components-io#text-input", + "edited": true, + "field_order": [ + "input_value" + ], + "frozen": false, + "icon": "type", + "legacy": false, + "lf_version": "1.6.3.dev0", + "metadata": {}, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Output Text", + "group_outputs": false, + "hidden": null, + "method": "text_response", + "name": "text", + "options": null, + "required_inputs": null, + "selected": "Message", + "tool_mode": true, + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_type": "Component", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from langflow.base.io.text import TextComponent\r\nfrom langflow.io import MultilineInput, Output, SecretStrInput\r\nfrom langflow.schema.message import Message\r\n\r\n\r\nclass SecretInputComponent(TextComponent):\r\n display_name = \"Secret Input\"\r\n description = \"Allows the selection of a secret to be generated as output..\"\r\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\r\n icon = \"type\"\r\n name = \"SecretInput\"\r\n\r\n inputs = [\r\n SecretStrInput(\r\n name=\"input_value\",\r\n display_name=\"Secret\",\r\n info=\"Secret to be passed as input.\",\r\n ),\r\n ]\r\n outputs = [\r\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\r\n ]\r\n\r\n def text_response(self) -> Message:\r\n return Message(\r\n text=self.input_value,\r\n )\r\n" + }, + "input_value": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "Secret", + "dynamic": false, + "info": "Secret to be passed as input.", + "input_types": [], + "load_from_db": true, + "name": "input_value", + "password": true, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "OWNER_EMAIL" + } + }, + "tool_mode": false + }, + "showNode": true, + "type": "SecretInput" + }, + "dragging": false, + "id": "SecretInput-pYHMH", + "measured": { + "height": 220, + "width": 320 + }, + "position": { + "x": 1950.891944424919, + "y": 923.3252721744263 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "id": "SecretInput-aoBVB", + "node": { + "base_classes": [ + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Allows the selection of a secret to be generated as output..", + "display_name": "Secret Input", + "documentation": "https://docs.langflow.org/components-io#text-input", + "edited": true, + "field_order": [ + "input_value" + ], + "frozen": false, + "icon": "type", + "legacy": false, + "lf_version": "1.6.3.dev0", + "metadata": {}, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Output Text", + "group_outputs": false, + "hidden": null, + "method": "text_response", + "name": "text", + "options": null, + "required_inputs": null, + "selected": "Message", + "tool_mode": true, + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_type": "Component", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from langflow.base.io.text import TextComponent\r\nfrom langflow.io import MultilineInput, Output, SecretStrInput\r\nfrom langflow.schema.message import Message\r\n\r\n\r\nclass SecretInputComponent(TextComponent):\r\n display_name = \"Secret Input\"\r\n description = \"Allows the selection of a secret to be generated as output..\"\r\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\r\n icon = \"type\"\r\n name = \"SecretInput\"\r\n\r\n inputs = [\r\n SecretStrInput(\r\n name=\"input_value\",\r\n display_name=\"Secret\",\r\n info=\"Secret to be passed as input.\",\r\n ),\r\n ]\r\n outputs = [\r\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\r\n ]\r\n\r\n def text_response(self) -> Message:\r\n return Message(\r\n text=self.input_value,\r\n )\r\n" + }, + "input_value": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "Secret", + "dynamic": false, + "info": "Secret to be passed as input.", + "input_types": [], + "load_from_db": true, + "name": "input_value", + "password": true, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "OWNER_NAME" + } + }, + "tool_mode": false + }, + "showNode": true, + "type": "SecretInput" + }, + "dragging": false, + "id": "SecretInput-aoBVB", + "measured": { + "height": 220, + "width": 320 + }, + "position": { + "x": 1956.6284934755163, + "y": 1161.8450254281008 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "description": "Generate embeddings using a specified provider.", + "display_name": "Embedding Model", + "id": "EmbeddingModel-muH88", + "node": { + "base_classes": [ + "Embeddings" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Generate embeddings using a specified provider.", + "display_name": "Embedding Model", + "documentation": "https://docs.langflow.org/components-embedding-models", + "edited": false, + "field_order": [ + "provider", + "api_base", + "ollama_base_url", + "base_url_ibm_watsonx", + "model", + "api_key", + "project_id", + "dimensions", + "chunk_size", + "request_timeout", + "max_retries", + "show_progress_bar", + "model_kwargs", + "truncate_input_tokens", + "input_text" + ], + "frozen": false, + "icon": "binary", + "last_updated": "2025-11-25T23:39:18.966Z", + "legacy": false, + "metadata": { + "code_hash": "9e44c83a5058", + "dependencies": { + "dependencies": [ + { + "name": "requests", + "version": "2.32.5" + }, + { + "name": "ibm_watsonx_ai", + "version": "1.4.2" + }, + { + "name": "langchain_openai", + "version": "0.3.23" + }, + { + "name": "lfx", + "version": null + }, + { + "name": "langchain_ollama", + "version": "0.3.10" + }, + { + "name": "langchain_community", + "version": "0.3.21" + }, + { + "name": "langchain_ibm", + "version": "0.3.19" + } + ], + "total_dependencies": 7 + }, + "module": "custom_components.embedding_model" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Embedding Model", + "group_outputs": false, + "loop_types": null, + "method": "build_embeddings", + "name": "embeddings", + "options": null, + "required_inputs": null, + "selected": "Embeddings", + "tool_mode": true, + "types": [ + "Embeddings" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, + "_type": "Component", + "api_base": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "API Base URL", + "dynamic": false, + "info": "Base URL for the API. Leave empty for default.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "api_base", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "api_key": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "API Key (Optional)", + "dynamic": false, + "info": "Model Provider API key", + "input_types": [], + "load_from_db": true, + "name": "api_key", + "override_skip": false, + "password": true, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": false, + "title_case": false, + "track_in_telemetry": false, + "type": "str", + "value": "OPENAI_API_KEY" + }, + "base_url_ibm_watsonx": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "watsonx API Endpoint", + "dynamic": false, + "external_options": {}, + "info": "The base URL of the API (IBM watsonx.ai only)", + "name": "base_url_ibm_watsonx", + "options": [ + "https://us-south.ml.cloud.ibm.com", + "https://eu-de.ml.cloud.ibm.com", + "https://eu-gb.ml.cloud.ibm.com", + "https://au-syd.ml.cloud.ibm.com", + "https://jp-tok.ml.cloud.ibm.com", + "https://ca-tor.ml.cloud.ibm.com" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": false, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "https://us-south.ml.cloud.ibm.com" + }, + "chunk_size": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Chunk Size", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "chunk_size", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 1000 + }, + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + }, + "dimensions": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Dimensions", + "dynamic": false, + "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", + "list": false, + "list_add_label": "Add More", + "name": "dimensions", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": "" + }, + "input_text": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Include the original text in the output", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "input_text", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "is_refresh": false, + "max_retries": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Max Retries", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "max_retries", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 3 + }, + "model": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Model Name", + "dynamic": false, + "external_options": {}, + "info": "Select the embedding model to use", + "name": "model", + "options": [], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "refresh_button": true, + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "" + }, + "model_kwargs": { + "_input_type": "DictInput", + "advanced": true, + "display_name": "Model Kwargs", + "dynamic": false, + "info": "Additional keyword arguments to pass to the model.", + "list": false, + "list_add_label": "Add More", + "name": "model_kwargs", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "track_in_telemetry": false, + "type": "dict", + "value": {} + }, + "ollama_base_url": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "Ollama API URL", + "dynamic": false, + "info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": true, + "name": "ollama_base_url", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "OLLAMA_BASE_URL" + }, + "project_id": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "Project ID", + "dynamic": false, + "info": "IBM watsonx.ai Project ID (required for IBM watsonx.ai)", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "project_id", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "provider": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Model Provider", + "dynamic": false, + "external_options": {}, + "info": "Select the embedding model provider", + "name": "provider", + "options": [ + "OpenAI", + "Ollama", + "IBM watsonx.ai" + ], + "options_metadata": [ + { + "icon": "OpenAI" + }, + { + "icon": "Ollama" + }, + { + "icon": "WatsonxAI" + } + ], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "selected_metadata": { + "icon": "Ollama" + }, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "Ollama" + }, + "request_timeout": { + "_input_type": "FloatInput", + "advanced": true, + "display_name": "Request Timeout", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "request_timeout", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "float", + "value": "" + }, + "show_progress_bar": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Show Progress Bar", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "show_progress_bar", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": false + }, + "truncate_input_tokens": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Truncate Input Tokens", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "truncate_input_tokens", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 200 + } + }, + "tool_mode": false + }, + "showNode": true, + "type": "EmbeddingModel" + }, + "dragging": false, + "id": "EmbeddingModel-muH88", + "measured": { + "height": 369, + "width": 320 + }, + "position": { + "x": 1699.406784507022, + "y": 2056.210282680243 + }, + "selected": false, + "type": "genericNode" + }, + { + "data": { + "description": "Generate embeddings using a specified provider.", + "display_name": "Embedding Model", + "id": "EmbeddingModel-Rp0iI", + "node": { + "base_classes": [ + "Embeddings" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Generate embeddings using a specified provider.", + "display_name": "Embedding Model", + "documentation": "https://docs.langflow.org/components-embedding-models", + "edited": false, + "field_order": [ + "provider", + "api_base", + "ollama_base_url", + "base_url_ibm_watsonx", + "model", + "api_key", + "project_id", + "dimensions", + "chunk_size", + "request_timeout", + "max_retries", + "show_progress_bar", + "model_kwargs", + "truncate_input_tokens", + "input_text" + ], + "frozen": false, + "icon": "binary", + "last_updated": "2025-11-25T23:39:43.564Z", + "legacy": false, + "metadata": { + "code_hash": "9e44c83a5058", + "dependencies": { + "dependencies": [ + { + "name": "requests", + "version": "2.32.5" + }, + { + "name": "ibm_watsonx_ai", + "version": "1.4.2" + }, + { + "name": "langchain_openai", + "version": "0.3.23" + }, + { + "name": "lfx", + "version": null + }, + { + "name": "langchain_ollama", + "version": "0.3.10" + }, + { + "name": "langchain_community", + "version": "0.3.21" + }, + { + "name": "langchain_ibm", + "version": "0.3.19" + } + ], + "total_dependencies": 7 + }, + "module": "custom_components.embedding_model" + }, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Embedding Model", + "group_outputs": false, + "loop_types": null, + "method": "build_embeddings", + "name": "embeddings", + "options": null, + "required_inputs": null, + "selected": "Embeddings", + "tool_mode": true, + "types": [ + "Embeddings" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_frontend_node_flow_id": { + "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" + }, + "_frontend_node_folder_id": { + "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + }, + "_type": "Component", + "api_base": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "API Base URL", + "dynamic": false, + "info": "Base URL for the API. Leave empty for default.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "api_base", + "override_skip": false, + "placeholder": "", + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "api_key": { + "_input_type": "SecretStrInput", + "advanced": false, + "display_name": "IBM watsonx.ai API Key", + "dynamic": false, + "info": "Model Provider API key", + "input_types": [], + "load_from_db": true, + "name": "api_key", + "override_skip": false, + "password": true, + "placeholder": "", + "real_time_refresh": true, + "required": true, + "show": true, + "title_case": false, + "track_in_telemetry": false, + "type": "str", + "value": "WATSONX_API_KEY" + }, + "base_url_ibm_watsonx": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "watsonx API Endpoint", + "dynamic": false, + "external_options": {}, + "info": "The base URL of the API (IBM watsonx.ai only)", + "name": "base_url_ibm_watsonx", + "options": [ + "https://us-south.ml.cloud.ibm.com", + "https://eu-de.ml.cloud.ibm.com", + "https://eu-gb.ml.cloud.ibm.com", + "https://au-syd.ml.cloud.ibm.com", + "https://jp-tok.ml.cloud.ibm.com", + "https://ca-tor.ml.cloud.ibm.com" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "https://us-south.ml.cloud.ibm.com" + }, + "chunk_size": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Chunk Size", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "chunk_size", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 1000 + }, + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + }, + "dimensions": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Dimensions", + "dynamic": false, + "info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.", + "list": false, + "list_add_label": "Add More", + "name": "dimensions", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": "" + }, + "input_text": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Include the original text in the output", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "input_text", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "is_refresh": false, + "max_retries": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Max Retries", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "max_retries", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 3 + }, + "model": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Model Name", + "dynamic": false, + "external_options": {}, + "info": "Select the embedding model to use", + "name": "model", + "options": [ + "ibm/granite-embedding-278m-multilingual", + "ibm/slate-125m-english-rtrvr-v2", + "ibm/slate-30m-english-rtrvr-v2", + "intfloat/multilingual-e5-large", + "sentence-transformers/all-minilm-l6-v2" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "refresh_button": true, + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "ibm/granite-embedding-278m-multilingual" + }, + "model_kwargs": { + "_input_type": "DictInput", + "advanced": true, + "display_name": "Model Kwargs", + "dynamic": false, + "info": "Additional keyword arguments to pass to the model.", + "list": false, + "list_add_label": "Add More", + "name": "model_kwargs", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "track_in_telemetry": false, + "type": "dict", + "value": {} + }, + "ollama_base_url": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "Ollama API URL", + "dynamic": false, + "info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": true, + "name": "ollama_base_url", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": false, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "OLLAMA_BASE_URL" + }, + "project_id": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "Project ID", + "dynamic": false, + "info": "IBM watsonx.ai Project ID (required for IBM watsonx.ai)", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": true, + "name": "project_id", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "WATSONX_PROJECT_ID" + }, + "provider": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Model Provider", + "dynamic": false, + "external_options": {}, + "info": "Select the embedding model provider", + "name": "provider", + "options": [ + "OpenAI", + "Ollama", + "IBM watsonx.ai" + ], + "options_metadata": [ + { + "icon": "OpenAI" + }, + { + "icon": "Ollama" + }, + { + "icon": "WatsonxAI" + } + ], + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "selected_metadata": { + "icon": "WatsonxAI" + }, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "IBM watsonx.ai" + }, + "request_timeout": { + "_input_type": "FloatInput", + "advanced": true, + "display_name": "Request Timeout", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "request_timeout", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "float", + "value": "" + }, + "show_progress_bar": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Show Progress Bar", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "show_progress_bar", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": false + }, + "truncate_input_tokens": { + "_input_type": "IntInput", + "advanced": true, + "display_name": "Truncate Input Tokens", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "truncate_input_tokens", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "int", + "value": 200 + } + }, + "tool_mode": false + }, + "showNode": true, + "type": "EmbeddingModel" + }, + "dragging": false, + "id": "EmbeddingModel-Rp0iI", + "measured": { + "height": 534, + "width": 320 + }, + "position": { + "x": 1333.0572399196485, + "y": 2065.5678700260246 + }, + "selected": false, "type": "genericNode" } ], "viewport": { - "x": -425.92312760235086, - "y": -699.5243463797829, - "zoom": 0.5890166507565666 + "x": -223.95247595079695, + "y": -67.29219204632898, + "zoom": 0.36800352363368594 } }, "description": "This flow is to ingest the URL to open search.", "endpoint_name": null, "id": "72c3d17c-2dac-4a73-b48a-6518473d7830", "is_component": false, + "last_tested_version": "1.7.0", "mcp_enabled": true, - "last_tested_version": "1.7.0.dev19", "name": "OpenSearch URL Ingestion Flow", "tags": [ "openai", From 07b84e373a99a025d1a8510446713cb928fd23bb Mon Sep 17 00:00:00 2001 From: Edwin Jose Date: Tue, 25 Nov 2025 19:40:24 -0500 Subject: [PATCH 15/43] Update config import to use settings module Replaced imports from config_manager with settings in chat_service.py and langflow_file_service.py to use get_openrag_config from config.settings. This change ensures consistency with the updated configuration structure. --- src/services/chat_service.py | 6 +++--- src/services/langflow_file_service.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/services/chat_service.py b/src/services/chat_service.py index d8cb320d..ecbc9e3e 100644 --- a/src/services/chat_service.py +++ b/src/services/chat_service.py @@ -66,7 +66,7 @@ class ChatService: extra_headers["X-LANGFLOW-GLOBAL-VAR-JWT"] = jwt_token # Pass the selected embedding model as a global variable - from config.config_manager import get_openrag_config + from config.settings import get_openrag_config config = get_openrag_config() embedding_model = config.knowledge.embedding_model extra_headers["X-LANGFLOW-GLOBAL-VAR-SELECTED_EMBEDDING_MODEL"] = embedding_model @@ -181,7 +181,7 @@ class ChatService: extra_headers["X-LANGFLOW-GLOBAL-VAR-JWT"] = jwt_token # Pass the selected embedding model as a global variable - from config.config_manager import get_openrag_config + from config.settings import get_openrag_config config = get_openrag_config() embedding_model = config.knowledge.embedding_model extra_headers["X-LANGFLOW-GLOBAL-VAR-SELECTED_EMBEDDING_MODEL"] = embedding_model @@ -305,7 +305,7 @@ class ChatService: extra_headers["X-LANGFLOW-GLOBAL-VAR-JWT"] = jwt_token # Pass the selected embedding model as a global variable - from config.config_manager import get_openrag_config + from config.settings import get_openrag_config config = get_openrag_config() embedding_model = config.knowledge.embedding_model extra_headers["X-LANGFLOW-GLOBAL-VAR-SELECTED_EMBEDDING_MODEL"] = embedding_model diff --git a/src/services/langflow_file_service.py b/src/services/langflow_file_service.py index 103716e1..0c25adc1 100644 --- a/src/services/langflow_file_service.py +++ b/src/services/langflow_file_service.py @@ -141,7 +141,7 @@ class LangflowFileService: mimetype = str(file_tuples[0][2]) if file_tuples and len(file_tuples) > 0 and len(file_tuples[0]) > 2 else "" # Get the current embedding model from config - from config.config_manager import get_openrag_config + from config.settings import get_openrag_config config = get_openrag_config() embedding_model = config.knowledge.embedding_model From 63d6979eb1a24900f257a7f158d371a0471f2759 Mon Sep 17 00:00:00 2001 From: Eric Hare Date: Tue, 25 Nov 2025 17:20:30 -0800 Subject: [PATCH 16/43] fix: More graceful handling of port conflicts --- src/tui/managers/container_manager.py | 126 ++++++++++++++++++++++++++ src/tui/screens/monitor.py | 31 ++++++- src/tui/widgets/command_modal.py | 26 ++++++ 3 files changed, 182 insertions(+), 1 deletion(-) diff --git a/src/tui/managers/container_manager.py b/src/tui/managers/container_manager.py index 2953d550..7e75f4a0 100644 --- a/src/tui/managers/container_manager.py +++ b/src/tui/managers/container_manager.py @@ -43,6 +43,7 @@ class ServiceInfo: image: Optional[str] = None image_digest: Optional[str] = None created: Optional[str] = None + error_message: Optional[str] = None def __post_init__(self): if self.ports is None: @@ -135,6 +136,96 @@ class ContainerManager: return self.platform_detector.get_compose_installation_instructions() return self.platform_detector.get_installation_instructions() + def _extract_ports_from_compose(self) -> Dict[str, List[int]]: + """Extract port mappings from compose files. + + Returns: + Dict mapping service name to list of host ports + """ + service_ports: Dict[str, List[int]] = {} + + compose_files = [self.compose_file] + if hasattr(self, 'cpu_compose_file') and self.cpu_compose_file and self.cpu_compose_file.exists(): + compose_files.append(self.cpu_compose_file) + + for compose_file in compose_files: + if not compose_file.exists(): + continue + + try: + import re + content = compose_file.read_text() + current_service = None + in_ports_section = False + + for line in content.splitlines(): + # Detect service names + service_match = re.match(r'^ (\w[\w-]*):$', line) + if service_match: + current_service = service_match.group(1) + in_ports_section = False + if current_service not in service_ports: + service_ports[current_service] = [] + continue + + # Detect ports section + if current_service and re.match(r'^ ports:$', line): + in_ports_section = True + continue + + # Exit ports section on new top-level key + if in_ports_section and re.match(r'^ \w+:', line): + in_ports_section = False + + # Extract port mappings + if in_ports_section and current_service: + # Match patterns like: - "3000:3000", - "9200:9200", - 7860:7860 + port_match = re.search(r'["\']?(\d+):\d+["\']?', line) + if port_match: + host_port = int(port_match.group(1)) + if host_port not in service_ports[current_service]: + service_ports[current_service].append(host_port) + + except Exception as e: + logger.debug(f"Error parsing {compose_file} for ports: {e}") + continue + + return service_ports + + async def check_ports_available(self) -> tuple[bool, List[tuple[str, int, str]]]: + """Check if required ports are available. + + Returns: + Tuple of (all_available, conflicts) where conflicts is a list of + (service_name, port, error_message) tuples + """ + import socket + + service_ports = self._extract_ports_from_compose() + conflicts: List[tuple[str, int, str]] = [] + + for service_name, ports in service_ports.items(): + for port in ports: + try: + # Try to bind to the port to check if it's available + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(0.5) + result = sock.connect_ex(('127.0.0.1', port)) + sock.close() + + if result == 0: + # Port is in use + conflicts.append(( + service_name, + port, + f"Port {port} is already in use" + )) + except Exception as e: + logger.debug(f"Error checking port {port}: {e}") + continue + + return (len(conflicts) == 0, conflicts) + async def _run_compose_command( self, args: List[str], cpu_mode: Optional[bool] = None ) -> tuple[bool, str, str]: @@ -655,6 +746,17 @@ class ContainerManager: yield False, f"ERROR: Compose file not found at {compose_file.absolute()}", False return + # Check for port conflicts before starting + yield False, "Checking port availability...", False + ports_available, conflicts = await self.check_ports_available() + if not ports_available: + yield False, "ERROR: Port conflicts detected:", False + for service_name, port, error_msg in conflicts: + yield False, f" - {service_name}: {error_msg}", False + yield False, "Please stop the conflicting services and try again.", False + yield False, "Services not started due to port conflicts.", False + return + yield False, "Starting OpenRAG services...", False missing_images: List[str] = [] @@ -677,13 +779,37 @@ class ContainerManager: yield False, "Creating and starting containers...", False up_success = {"value": True} + error_messages = [] + async for message, replace_last in self._stream_compose_command(["up", "-d"], up_success, cpu_mode): + # Detect error patterns in the output + import re + lower_msg = message.lower() + + # Check for common error patterns + if any(pattern in lower_msg for pattern in [ + "port.*already.*allocated", + "address already in use", + "bind.*address already in use", + "port is already allocated" + ]): + error_messages.append("Port conflict detected") + up_success["value"] = False + elif "error" in lower_msg or "failed" in lower_msg: + # Generic error detection + if message not in error_messages: + error_messages.append(message) + yield False, message, replace_last if up_success["value"]: yield True, "Services started successfully", False else: yield False, "Failed to start services. See output above for details.", False + if error_messages: + yield False, "\nDetected errors:", False + for err in error_messages[:5]: # Limit to first 5 errors + yield False, f" - {err}", False async def stop_services(self) -> AsyncIterator[tuple[bool, str]]: """Stop all services and yield progress updates.""" diff --git a/src/tui/screens/monitor.py b/src/tui/screens/monitor.py index 01c243c6..601566b7 100644 --- a/src/tui/screens/monitor.py +++ b/src/tui/screens/monitor.py @@ -311,17 +311,46 @@ class MonitorScreen(Screen): """Start services with progress updates.""" self.operation_in_progress = True try: + # Check for port conflicts before attempting to start + ports_available, conflicts = await self.container_manager.check_ports_available() + if not ports_available: + # Show error notification instead of modal + conflict_msgs = [] + for service_name, port, error_msg in conflicts[:3]: # Show first 3 + conflict_msgs.append(f"{service_name} (port {port})") + + conflict_str = ", ".join(conflict_msgs) + if len(conflicts) > 3: + conflict_str += f" and {len(conflicts) - 3} more" + + self.notify( + f"Cannot start services: Port conflicts detected for {conflict_str}. " + f"Please stop the conflicting services first.", + severity="error", + timeout=10 + ) + # Refresh to show current state + await self._refresh_services() + return + # Show command output in modal dialog command_generator = self.container_manager.start_services(cpu_mode) modal = CommandOutputModal( "Starting Services", command_generator, - on_complete=None, # We'll refresh in on_screen_resume instead + on_complete=self._on_start_complete, # Refresh after completion ) self.app.push_screen(modal) + except Exception as e: + self.notify(f"Error starting services: {str(e)}", severity="error") + await self._refresh_services() finally: self.operation_in_progress = False + async def _on_start_complete(self) -> None: + """Callback after service start completes.""" + await self._refresh_services() + async def _stop_services(self) -> None: """Stop services with progress updates.""" self.operation_in_progress = True diff --git a/src/tui/widgets/command_modal.py b/src/tui/widgets/command_modal.py index a5013031..a75a46ee 100644 --- a/src/tui/widgets/command_modal.py +++ b/src/tui/widgets/command_modal.py @@ -23,6 +23,7 @@ class CommandOutputModal(ModalScreen): ("p", "pause_waves", "Pause"), ("f", "speed_up", "Faster"), ("s", "speed_down", "Slower"), + ("escape", "close_modal", "Close"), ] DEFAULT_CSS = """ @@ -188,6 +189,8 @@ class CommandOutputModal(ModalScreen): self._output_lines: list[str] = [] self._layer_line_map: dict[str, int] = {} # Maps layer ID to line index self._status_task: Optional[asyncio.Task] = None + self._error_detected = False + self._command_complete = False def compose(self) -> ComposeResult: """Create the modal dialog layout.""" @@ -254,6 +257,12 @@ class CommandOutputModal(ModalScreen): for w in waves.wavelets: w.speed = max(0.1, w.speed * 0.8) + def action_close_modal(self) -> None: + """Close the modal (only if error detected or command complete).""" + close_btn = self.query_one("#close-btn", Button) + if not close_btn.disabled: + self.dismiss() + async def _run_command(self) -> None: """Run the command and update the output in real-time.""" output = self.query_one("#command-output", TextArea) @@ -273,8 +282,25 @@ class CommandOutputModal(ModalScreen): # Move cursor to end to trigger scroll output.move_cursor((len(self._output_lines), 0)) + # Detect error patterns in messages + import re + lower_msg = message.lower() if message else "" + if not self._error_detected and any(pattern in lower_msg for pattern in [ + "error:", + "failed", + "port.*already.*allocated", + "address already in use", + "not found", + "permission denied" + ]): + self._error_detected = True + # Enable close button when error detected + close_btn = self.query_one("#close-btn", Button) + close_btn.disabled = False + # If command is complete, update UI if is_complete: + self._command_complete = True self._update_output("Command completed successfully", False) output.text = "\n".join(self._output_lines) output.move_cursor((len(self._output_lines), 0)) From 0f2012bbb994cd37d857126f0035e533890d2acc Mon Sep 17 00:00:00 2001 From: Edwin Jose Date: Tue, 25 Nov 2025 22:50:38 -0500 Subject: [PATCH 17/43] Update Langflow tweaks and add provider credentials to headers Replaces all references to 'OpenSearchHybrid-Ve6bS' with 'OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4' in main.py, processors, and file service. Adds a utility for injecting provider credentials into Langflow request headers and integrates it into chat and file services for improved credential handling. --- src/main.py | 2 +- src/models/processors.py | 6 ++--- src/services/chat_service.py | 18 ++++++++++++++- src/services/langflow_file_service.py | 15 ++++++++---- src/utils/langflow_headers.py | 33 +++++++++++++++++++++++++++ 5 files changed, 64 insertions(+), 10 deletions(-) create mode 100644 src/utils/langflow_headers.py diff --git a/src/main.py b/src/main.py index 8f714be9..ca2f6b11 100644 --- a/src/main.py +++ b/src/main.py @@ -370,7 +370,7 @@ async def _ingest_default_documents_langflow(services, file_paths): # Prepare tweaks for default documents with anonymous user metadata default_tweaks = { - "OpenSearchHybrid-Ve6bS": { + "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4": { "docs_metadata": [ {"key": "owner", "value": None}, {"key": "owner_name", "value": anonymous_user.name}, diff --git a/src/models/processors.py b/src/models/processors.py index a5acc57d..7edbc475 100644 --- a/src/models/processors.py +++ b/src/models/processors.py @@ -743,9 +743,9 @@ class LangflowFileProcessor(TaskProcessor): if metadata_tweaks: # Initialize the OpenSearch component tweaks if not already present - if "OpenSearchHybrid-Ve6bS" not in final_tweaks: - final_tweaks["OpenSearchHybrid-Ve6bS"] = {} - final_tweaks["OpenSearchHybrid-Ve6bS"]["docs_metadata"] = metadata_tweaks + if "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4" not in final_tweaks: + final_tweaks["OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4"] = {} + final_tweaks["OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4"]["docs_metadata"] = metadata_tweaks # Process file using langflow service result = await self.langflow_file_service.upload_and_ingest_file( diff --git a/src/services/chat_service.py b/src/services/chat_service.py index ecbc9e3e..040f03d8 100644 --- a/src/services/chat_service.py +++ b/src/services/chat_service.py @@ -67,10 +67,15 @@ class ChatService: # Pass the selected embedding model as a global variable from config.settings import get_openrag_config + from utils.langflow_headers import add_provider_credentials_to_headers + config = get_openrag_config() embedding_model = config.knowledge.embedding_model extra_headers["X-LANGFLOW-GLOBAL-VAR-SELECTED_EMBEDDING_MODEL"] = embedding_model - + + # Add provider credentials to headers + add_provider_credentials_to_headers(extra_headers, config) + logger.debug(f"[LF] Extra headers {extra_headers}") # Get context variables for filters, limit, and threshold from auth_context import ( get_score_threshold, @@ -182,9 +187,14 @@ class ChatService: # Pass the selected embedding model as a global variable from config.settings import get_openrag_config + from utils.langflow_headers import add_provider_credentials_to_headers + config = get_openrag_config() embedding_model = config.knowledge.embedding_model extra_headers["X-LANGFLOW-GLOBAL-VAR-SELECTED_EMBEDDING_MODEL"] = embedding_model + + # Add provider credentials to headers + add_provider_credentials_to_headers(extra_headers, config) # Build the complete filter expression like the chat service does filter_expression = {} @@ -306,9 +316,15 @@ class ChatService: # Pass the selected embedding model as a global variable from config.settings import get_openrag_config + from utils.langflow_headers import add_provider_credentials_to_headers + config = get_openrag_config() embedding_model = config.knowledge.embedding_model extra_headers["X-LANGFLOW-GLOBAL-VAR-SELECTED_EMBEDDING_MODEL"] = embedding_model + + # Add provider credentials to headers + add_provider_credentials_to_headers(extra_headers, config) + # Ensure the Langflow client exists; try lazy init if needed langflow_client = await clients.ensure_langflow_client() if not langflow_client: diff --git a/src/services/langflow_file_service.py b/src/services/langflow_file_service.py index 0c25adc1..5e7204cc 100644 --- a/src/services/langflow_file_service.py +++ b/src/services/langflow_file_service.py @@ -94,7 +94,7 @@ class LangflowFileService: # Pass JWT token via tweaks using the x-langflow-global-var- pattern if jwt_token: # Using the global variable pattern that Langflow expects for OpenSearch components - tweaks["OpenSearchHybrid-Ve6bS"] = {"jwt_token": jwt_token} + tweaks["OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4"] = {"jwt_token": jwt_token} logger.debug("[LF] Added JWT token to tweaks for OpenSearch components") else: logger.warning("[LF] No JWT token provided") @@ -112,9 +112,9 @@ class LangflowFileService: logger.info(f"[LF] Metadata tweaks {metadata_tweaks}") # if metadata_tweaks: # # Initialize the OpenSearch component tweaks if not already present - # if "OpenSearchHybrid-Ve6bS" not in tweaks: - # tweaks["OpenSearchHybrid-Ve6bS"] = {} - # tweaks["OpenSearchHybrid-Ve6bS"]["docs_metadata"] = metadata_tweaks + # if "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4" not in tweaks: + # tweaks["OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4"] = {} + # tweaks["OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4"]["docs_metadata"] = metadata_tweaks # logger.debug( # "[LF] Added metadata to tweaks", metadata_count=len(metadata_tweaks) # ) @@ -140,8 +140,10 @@ class LangflowFileService: filename = str(file_tuples[0][0]) if file_tuples and len(file_tuples) > 0 else "" mimetype = str(file_tuples[0][2]) if file_tuples and len(file_tuples) > 0 and len(file_tuples[0]) > 2 else "" - # Get the current embedding model from config + # Get the current embedding model and provider credentials from config from config.settings import get_openrag_config + from utils.langflow_headers import add_provider_credentials_to_headers + config = get_openrag_config() embedding_model = config.knowledge.embedding_model @@ -156,6 +158,9 @@ class LangflowFileService: "X-Langflow-Global-Var-FILESIZE": str(file_size_bytes), "X-Langflow-Global-Var-SELECTED_EMBEDDING_MODEL": str(embedding_model), } + + # Add provider credentials as global variables for ingestion + add_provider_credentials_to_headers(headers, config) logger.info(f"[LF] Headers {headers}") logger.info(f"[LF] Payload {payload}") resp = await clients.langflow_request( diff --git a/src/utils/langflow_headers.py b/src/utils/langflow_headers.py new file mode 100644 index 00000000..e97fd0c4 --- /dev/null +++ b/src/utils/langflow_headers.py @@ -0,0 +1,33 @@ +"""Utility functions for building Langflow request headers.""" + +from typing import Dict +from utils.container_utils import transform_localhost_url + + +def add_provider_credentials_to_headers(headers: Dict[str, str], config) -> None: + """Add provider credentials to headers as Langflow global variables. + + Args: + headers: Dictionary of headers to add credentials to + config: OpenRAGConfig object containing provider configurations + """ + # Add OpenAI credentials + if config.providers.openai.api_key: + headers["X-LANGFLOW-GLOBAL-VAR-OPENAI_API_KEY"] = str(config.providers.openai.api_key) + + # Add Anthropic credentials + if config.providers.anthropic.api_key: + headers["X-LANGFLOW-GLOBAL-VAR-ANTHROPIC_API_KEY"] = str(config.providers.anthropic.api_key) + + # Add WatsonX credentials + if config.providers.watsonx.api_key: + headers["X-LANGFLOW-GLOBAL-VAR-WATSONX_API_KEY"] = str(config.providers.watsonx.api_key) + + if config.providers.watsonx.project_id: + headers["X-LANGFLOW-GLOBAL-VAR-WATSONX_PROJECT_ID"] = str(config.providers.watsonx.project_id) + + # Add Ollama endpoint (with localhost transformation) + if config.providers.ollama.endpoint: + ollama_endpoint = transform_localhost_url(config.providers.ollama.endpoint) + headers["X-LANGFLOW-GLOBAL-VAR-OLLAMA_BASE_URL"] = str(ollama_endpoint) + From 8aa44037dc56c236bb0084cfbc794083a8e55fe8 Mon Sep 17 00:00:00 2001 From: Edwin Jose Date: Tue, 25 Nov 2025 22:50:43 -0500 Subject: [PATCH 18/43] Update Dockerfile.langflow --- Dockerfile.langflow | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile.langflow b/Dockerfile.langflow index 6119de84..aaa9f5d6 100644 --- a/Dockerfile.langflow +++ b/Dockerfile.langflow @@ -1,4 +1,4 @@ -FROM langflowai/langflow-nightly:1.7.0.dev19 +FROM langflowai/langflow-nightly:1.7.0.dev21 EXPOSE 7860 From d3a66c82b97a3dff65d53da238174928e2b89fef Mon Sep 17 00:00:00 2001 From: Edwin Jose Date: Tue, 25 Nov 2025 23:09:58 -0500 Subject: [PATCH 19/43] Add fail-safe mode to Embedding Model component Introduces a 'fail_safe_mode' option to the Embedding Model component, allowing errors to be logged and None returned instead of raising exceptions. Refactors embedding initialization logic for OpenAI, Ollama, and IBM watsonx.ai providers to support this mode, and updates UI configuration and metadata accordingly. --- flows/ingestion_flow.json | 546 +++++++++++++++++++++++++++++++++----- flows/openrag_agent.json | 196 +++++++++----- flows/openrag_nudges.json | 156 ++++++++--- 3 files changed, 720 insertions(+), 178 deletions(-) diff --git a/flows/ingestion_flow.json b/flows/ingestion_flow.json index 3a9fccd8..e3e0252a 100644 --- a/flows/ingestion_flow.json +++ b/flows/ingestion_flow.json @@ -375,6 +375,7 @@ }, { "animated": false, + "className": "", "data": { "sourceHandle": { "dataType": "EmbeddingModel", @@ -399,6 +400,36 @@ "sourceHandle": "{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-EAo9iœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}", "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4", "targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4œ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}" + }, + { + "animated": false, + "className": "", + "data": { + "sourceHandle": { + "dataType": "AdvancedDynamicFormBuilder", + "id": "AdvancedDynamicFormBuilder-81Exw", + "name": "form_data", + "output_types": [ + "Data" + ] + }, + "targetHandle": { + "fieldName": "input_value", + "id": "ChatOutput-4Bw8A", + "inputTypes": [ + "Data", + "DataFrame", + "Message" + ], + "type": "other" + } + }, + "id": "xy-edge__AdvancedDynamicFormBuilder-81Exw{œdataTypeœ:œAdvancedDynamicFormBuilderœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œnameœ:œform_dataœ,œoutput_typesœ:[œDataœ]}-ChatOutput-4Bw8A{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-4Bw8Aœ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}", + "selected": false, + "source": "AdvancedDynamicFormBuilder-81Exw", + "sourceHandle": "{œdataTypeœ:œAdvancedDynamicFormBuilderœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œnameœ:œform_dataœ,œoutput_typesœ:[œDataœ]}", + "target": "ChatOutput-4Bw8A", + "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-4Bw8Aœ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}" } ], "nodes": [ @@ -429,7 +460,7 @@ "frozen": false, "icon": "scissors-line-dashed", "legacy": false, - "lf_version": "1.7.0", + "lf_version": "1.7.0.dev21", "metadata": { "code_hash": "f2867efda61f", "dependencies": { @@ -666,9 +697,9 @@ ], "frozen": false, "icon": "braces", - "last_updated": "2025-11-26T00:02:32.601Z", + "last_updated": "2025-11-26T04:04:23.261Z", "legacy": false, - "lf_version": "1.7.0", + "lf_version": "1.7.0.dev21", "metadata": {}, "minimized": false, "output_types": [], @@ -716,7 +747,7 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" }, "_type": "Component", "code": { @@ -1005,7 +1036,7 @@ "frozen": false, "icon": "type", "legacy": false, - "lf_version": "1.7.0", + "lf_version": "1.7.0.dev21", "metadata": {}, "minimized": false, "output_types": [], @@ -1105,7 +1136,7 @@ "frozen": false, "icon": "type", "legacy": false, - "lf_version": "1.7.0", + "lf_version": "1.7.0.dev21", "metadata": {}, "minimized": false, "output_types": [], @@ -1205,7 +1236,7 @@ "frozen": false, "icon": "type", "legacy": false, - "lf_version": "1.7.0", + "lf_version": "1.7.0.dev21", "metadata": {}, "minimized": false, "output_types": [], @@ -1305,7 +1336,7 @@ "frozen": false, "icon": "type", "legacy": false, - "lf_version": "1.7.0", + "lf_version": "1.7.0.dev21", "metadata": {}, "minimized": false, "output_types": [], @@ -1416,7 +1447,7 @@ "frozen": false, "icon": "Docling", "legacy": false, - "lf_version": "1.7.0", + "lf_version": "1.7.0.dev21", "metadata": { "code_hash": "26eeb513dded", "dependencies": { @@ -1692,9 +1723,7 @@ "bz2", "gz" ], - "file_path": [ - "167c86ee-113b-4286-990c-b9566c363215/IBM Recognition Center Home.pdf" - ], + "file_path": [], "info": "Supported file extensions: adoc, asciidoc, asc, bmp, csv, dotx, dotm, docm, docx, htm, html, jpeg, json, md, pdf, png, potx, ppsx, pptm, potm, ppsm, pptx, tiff, txt, xls, xlsx, xhtml, xml, webp; optionally bundled in file extensions: zip, tar, tgz, bz2, gz", "list": true, "list_add_label": "Add More", @@ -1754,7 +1783,7 @@ "dragging": false, "id": "DoclingRemote-Dp3PX", "measured": { - "height": 479, + "height": 475, "width": 320 }, "position": { @@ -1791,7 +1820,7 @@ "icon": "Docling", "last_updated": "2025-10-04T01:42:10.290Z", "legacy": false, - "lf_version": "1.7.0", + "lf_version": "1.7.0.dev21", "metadata": { "code_hash": "4de16ddd37ac", "dependencies": { @@ -2053,9 +2082,9 @@ ], "frozen": false, "icon": "table", - "last_updated": "2025-11-26T00:02:32.725Z", + "last_updated": "2025-11-26T04:04:23.363Z", "legacy": false, - "lf_version": "1.7.0", + "lf_version": "1.7.0.dev21", "metadata": { "code_hash": "904f4eaebccd", "dependencies": { @@ -2100,7 +2129,7 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" }, "_type": "Component", "ascending": { @@ -2504,9 +2533,9 @@ ], "frozen": false, "icon": "table", - "last_updated": "2025-11-26T00:02:32.726Z", + "last_updated": "2025-11-26T04:04:23.363Z", "legacy": false, - "lf_version": "1.7.0", + "lf_version": "1.7.0.dev21", "metadata": { "code_hash": "904f4eaebccd", "dependencies": { @@ -2551,7 +2580,7 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" }, "_type": "Component", "ascending": { @@ -2955,9 +2984,9 @@ ], "frozen": false, "icon": "table", - "last_updated": "2025-11-26T00:02:32.727Z", + "last_updated": "2025-11-26T04:04:23.364Z", "legacy": false, - "lf_version": "1.7.0", + "lf_version": "1.7.0.dev21", "metadata": { "code_hash": "904f4eaebccd", "dependencies": { @@ -3002,7 +3031,7 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" }, "_type": "Component", "ascending": { @@ -3445,8 +3474,9 @@ ], "frozen": false, "icon": "OpenSearch", - "last_updated": "2025-11-26T00:02:57.256Z", + "last_updated": "2025-11-26T03:03:10.896Z", "legacy": false, + "lf_version": "1.7.0.dev21", "metadata": { "code_hash": "8c78d799fef4", "dependencies": { @@ -3526,7 +3556,7 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" }, "_type": "Component", "auth_mode": { @@ -3830,7 +3860,7 @@ "dynamic": false, "info": "Valid JSON Web Token for authentication. Will be sent in the Authorization header (with optional 'Bearer ' prefix).", "input_types": [], - "load_from_db": true, + "load_from_db": false, "name": "jwt_token", "override_skip": false, "password": true, @@ -3840,7 +3870,7 @@ "title_case": false, "track_in_telemetry": false, "type": "str", - "value": "JWT" + "value": "jwt" }, "m": { "_input_type": "IntInput", @@ -3965,7 +3995,7 @@ "trace_as_metadata": true, "track_in_telemetry": false, "type": "query", - "value": "" + "value": "langflow" }, "should_cache_vector_store": { "_input_type": "BoolInput", @@ -4116,7 +4146,7 @@ "x": 2261.865622928042, "y": 1349.2821108833643 }, - "selected": false, + "selected": true, "type": "genericNode" }, { @@ -4132,7 +4162,7 @@ "description": "Generate embeddings using a specified provider.", "display_name": "Embedding Model", "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": false, + "edited": true, "field_order": [ "provider", "api_base", @@ -4148,15 +4178,16 @@ "show_progress_bar", "model_kwargs", "truncate_input_tokens", - "input_text" + "input_text", + "fail_safe_mode" ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T00:02:32.604Z", + "last_updated": "2025-11-26T04:04:42.856Z", "legacy": false, - "lf_version": "1.7.0", + "lf_version": "1.7.0.dev21", "metadata": { - "code_hash": "9e44c83a5058", + "code_hash": "0e2d6fe67a26", "dependencies": { "dependencies": [ { @@ -4173,7 +4204,7 @@ }, { "name": "lfx", - "version": null + "version": "0.2.0.dev21" }, { "name": "langchain_ollama", @@ -4190,7 +4221,7 @@ ], "total_dependencies": 7 }, - "module": "lfx.components.models_and_agents.embedding_model.EmbeddingModelComponent" + "module": "custom_components.embedding_model" }, "minimized": false, "output_types": [], @@ -4200,6 +4231,7 @@ "cache": true, "display_name": "Embedding Model", "group_outputs": false, + "hidden": null, "loop_types": null, "method": "build_embeddings", "name": "embeddings", @@ -4219,7 +4251,7 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" }, "_type": "Component", "api_base": { @@ -4260,7 +4292,7 @@ "password": true, "placeholder": "", "real_time_refresh": true, - "required": true, + "required": false, "show": true, "title_case": false, "track_in_telemetry": false, @@ -4335,7 +4367,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"fail_safe_mode\",\n display_name=\"Fail-Safe Mode\",\n value=False,\n advanced=True,\n info=\"When enabled, errors will be logged instead of raising exceptions. \"\n \"The component will return None on error.\",\n real_time_refresh=True,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n async def fetch_ollama_models(self) -> list[str]:\n try:\n return await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n except Exception: # noqa: BLE001\n\n logger.exception(\"Error fetching models\")\n return []\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize OpenAI embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n try:\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await self.fetch_ollama_models()\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize Ollama embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to authenticate with IBM watsonx.ai: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n msg = f\"Unknown provider: {provider}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n # Handle fail_safe_mode changes first - set all required fields to False if enabled\n if field_name == \"fail_safe_mode\":\n if field_value: # If fail_safe_mode is enabled\n build_config[\"api_key\"][\"required\"] = False\n elif hasattr(self, \"provider\"):\n # If fail_safe_mode is disabled, restore required flags based on provider\n if self.provider in [\"OpenAI\", \"IBM watsonx.ai\"]:\n build_config[\"api_key\"][\"required\"] = True\n else: # Ollama\n build_config[\"api_key\"][\"required\"] = False\n\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" }, "dimensions": { "_input_type": "IntInput", @@ -4357,6 +4389,28 @@ "type": "int", "value": "" }, + "fail_safe_mode": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Fail-Safe Mode", + "dynamic": false, + "info": "When enabled, errors will be logged instead of raising exceptions. The component will return None on error.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "fail_safe_mode", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, "input_text": { "_input_type": "BoolInput", "advanced": true, @@ -4407,6 +4461,7 @@ "dynamic": false, "external_options": {}, "info": "Select the embedding model to use", + "load_from_db": false, "name": "model", "options": [ "ibm/granite-embedding-278m-multilingual", @@ -4510,6 +4565,7 @@ "dynamic": false, "external_options": {}, "info": "Select the embedding model provider", + "load_from_db": false, "name": "provider", "options": [ "OpenAI", @@ -4531,6 +4587,9 @@ "placeholder": "", "real_time_refresh": true, "required": false, + "selected_metadata": { + "icon": "WatsonxAI" + }, "show": true, "title_case": false, "toggle": false, @@ -4632,7 +4691,7 @@ "description": "Generate embeddings using a specified provider.", "display_name": "Embedding Model", "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": false, + "edited": true, "field_order": [ "provider", "api_base", @@ -4648,15 +4707,16 @@ "show_progress_bar", "model_kwargs", "truncate_input_tokens", - "input_text" + "input_text", + "fail_safe_mode" ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T00:02:32.605Z", + "last_updated": "2025-11-26T04:04:40.931Z", "legacy": false, - "lf_version": "1.7.0", + "lf_version": "1.7.0.dev21", "metadata": { - "code_hash": "9e44c83a5058", + "code_hash": "0e2d6fe67a26", "dependencies": { "dependencies": [ { @@ -4673,7 +4733,7 @@ }, { "name": "lfx", - "version": null + "version": "0.2.0.dev21" }, { "name": "langchain_ollama", @@ -4690,7 +4750,7 @@ ], "total_dependencies": 7 }, - "module": "lfx.components.models_and_agents.embedding_model.EmbeddingModelComponent" + "module": "custom_components.embedding_model" }, "minimized": false, "output_types": [], @@ -4700,6 +4760,7 @@ "cache": true, "display_name": "Embedding Model", "group_outputs": false, + "hidden": null, "loop_types": null, "method": "build_embeddings", "name": "embeddings", @@ -4719,7 +4780,7 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" }, "_type": "Component", "api_base": { @@ -4835,7 +4896,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"fail_safe_mode\",\n display_name=\"Fail-Safe Mode\",\n value=False,\n advanced=True,\n info=\"When enabled, errors will be logged instead of raising exceptions. \"\n \"The component will return None on error.\",\n real_time_refresh=True,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n async def fetch_ollama_models(self) -> list[str]:\n try:\n return await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n except Exception: # noqa: BLE001\n\n logger.exception(\"Error fetching models\")\n return []\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize OpenAI embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n try:\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await self.fetch_ollama_models()\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize Ollama embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to authenticate with IBM watsonx.ai: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n msg = f\"Unknown provider: {provider}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n # Handle fail_safe_mode changes first - set all required fields to False if enabled\n if field_name == \"fail_safe_mode\":\n if field_value: # If fail_safe_mode is enabled\n build_config[\"api_key\"][\"required\"] = False\n elif hasattr(self, \"provider\"):\n # If fail_safe_mode is disabled, restore required flags based on provider\n if self.provider in [\"OpenAI\", \"IBM watsonx.ai\"]:\n build_config[\"api_key\"][\"required\"] = True\n else: # Ollama\n build_config[\"api_key\"][\"required\"] = False\n\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" }, "dimensions": { "_input_type": "IntInput", @@ -4857,6 +4918,28 @@ "type": "int", "value": "" }, + "fail_safe_mode": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Fail-Safe Mode", + "dynamic": false, + "info": "When enabled, errors will be logged instead of raising exceptions. The component will return None on error.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "fail_safe_mode", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, "input_text": { "_input_type": "BoolInput", "advanced": true, @@ -4907,11 +4990,9 @@ "dynamic": false, "external_options": {}, "info": "Select the embedding model to use", + "load_from_db": false, "name": "model", - "options": [ - "bge-large:latest", - "qwen3-embedding:4b" - ], + "options": [], "options_metadata": [], "override_skip": false, "placeholder": "", @@ -4925,7 +5006,7 @@ "trace_as_metadata": true, "track_in_telemetry": true, "type": "str", - "value": "bge-large:latest" + "value": "" }, "model_kwargs": { "_input_type": "DictInput", @@ -5007,6 +5088,7 @@ "dynamic": false, "external_options": {}, "info": "Select the embedding model provider", + "load_from_db": false, "name": "provider", "options": [ "OpenAI", @@ -5028,6 +5110,9 @@ "placeholder": "", "real_time_refresh": true, "required": false, + "selected_metadata": { + "icon": "Ollama" + }, "show": true, "title_case": false, "toggle": false, @@ -5129,7 +5214,7 @@ "description": "Generate embeddings using a specified provider.", "display_name": "Embedding Model", "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": false, + "edited": true, "field_order": [ "provider", "api_base", @@ -5145,15 +5230,16 @@ "show_progress_bar", "model_kwargs", "truncate_input_tokens", - "input_text" + "input_text", + "fail_safe_mode" ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T00:02:32.606Z", + "last_updated": "2025-11-26T04:04:37.496Z", "legacy": false, - "lf_version": "1.7.0", + "lf_version": "1.7.0.dev21", "metadata": { - "code_hash": "9e44c83a5058", + "code_hash": "0e2d6fe67a26", "dependencies": { "dependencies": [ { @@ -5170,7 +5256,7 @@ }, { "name": "lfx", - "version": null + "version": "0.2.0.dev21" }, { "name": "langchain_ollama", @@ -5187,7 +5273,7 @@ ], "total_dependencies": 7 }, - "module": "lfx.components.models_and_agents.embedding_model.EmbeddingModelComponent" + "module": "custom_components.embedding_model" }, "minimized": false, "output_types": [], @@ -5197,6 +5283,7 @@ "cache": true, "display_name": "Embedding Model", "group_outputs": false, + "hidden": null, "loop_types": null, "method": "build_embeddings", "name": "embeddings", @@ -5216,13 +5303,13 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" }, "_type": "Component", "api_base": { "_input_type": "MessageTextInput", "advanced": true, - "display_name": "API Base URL", + "display_name": "OpenAI API Base URL", "dynamic": false, "info": "Base URL for the API. Leave empty for default.", "input_types": [ @@ -5257,7 +5344,7 @@ "password": true, "placeholder": "", "real_time_refresh": true, - "required": true, + "required": false, "show": true, "title_case": false, "track_in_telemetry": false, @@ -5332,7 +5419,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"fail_safe_mode\",\n display_name=\"Fail-Safe Mode\",\n value=False,\n advanced=True,\n info=\"When enabled, errors will be logged instead of raising exceptions. \"\n \"The component will return None on error.\",\n real_time_refresh=True,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n async def fetch_ollama_models(self) -> list[str]:\n try:\n return await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n except Exception: # noqa: BLE001\n\n logger.exception(\"Error fetching models\")\n return []\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize OpenAI embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n try:\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await self.fetch_ollama_models()\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize Ollama embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to authenticate with IBM watsonx.ai: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n msg = f\"Unknown provider: {provider}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n # Handle fail_safe_mode changes first - set all required fields to False if enabled\n if field_name == \"fail_safe_mode\":\n if field_value: # If fail_safe_mode is enabled\n build_config[\"api_key\"][\"required\"] = False\n elif hasattr(self, \"provider\"):\n # If fail_safe_mode is disabled, restore required flags based on provider\n if self.provider in [\"OpenAI\", \"IBM watsonx.ai\"]:\n build_config[\"api_key\"][\"required\"] = True\n else: # Ollama\n build_config[\"api_key\"][\"required\"] = False\n\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" }, "dimensions": { "_input_type": "IntInput", @@ -5354,6 +5441,28 @@ "type": "int", "value": "" }, + "fail_safe_mode": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Fail-Safe Mode", + "dynamic": false, + "info": "When enabled, errors will be logged instead of raising exceptions. The component will return None on error.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "fail_safe_mode", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, "input_text": { "_input_type": "BoolInput", "advanced": true, @@ -5526,6 +5635,9 @@ "placeholder": "", "real_time_refresh": true, "required": false, + "selected_metadata": { + "icon": "OpenAI" + }, "show": true, "title_case": false, "toggle": false, @@ -5613,19 +5725,317 @@ }, "selected": false, "type": "genericNode" + }, + { + "data": { + "id": "ChatOutput-4Bw8A", + "node": { + "base_classes": [ + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": {}, + "description": "Display a chat message in the Playground.", + "display_name": "Chat Output", + "documentation": "https://docs.langflow.org/chat-input-and-output", + "edited": false, + "field_order": [ + "input_value", + "should_store_message", + "sender", + "sender_name", + "session_id", + "context_id", + "data_template", + "clean_data" + ], + "frozen": false, + "icon": "MessagesSquare", + "legacy": false, + "lf_version": "1.7.0.dev21", + "metadata": { + "code_hash": "cae45e2d53f6", + "dependencies": { + "dependencies": [ + { + "name": "orjson", + "version": "3.10.15" + }, + { + "name": "fastapi", + "version": "0.120.0" + }, + { + "name": "lfx", + "version": "0.2.0.dev21" + } + ], + "total_dependencies": 3 + }, + "module": "lfx.components.input_output.chat_output.ChatOutput" + }, + "minimized": true, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Output Message", + "group_outputs": false, + "method": "message_response", + "name": "message", + "selected": "Message", + "tool_mode": true, + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "template": { + "_type": "Component", + "clean_data": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Basic Clean Data", + "dynamic": false, + "info": "Whether to clean data before converting to string.", + "list": false, + "list_add_label": "Add More", + "name": "clean_data", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/chat-input-and-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n advanced=True,\n info=\"Whether to clean data before converting to string.\",\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, _, display_name, source_id = self.get_properties_from_source_component()\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message) and not self.is_connected_to_chat_input():\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id or self.graph.session_id or \"\"\n message.context_id = self.context_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n\n # Store message if needed\n if message.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n clean_data: bool = getattr(self, \"clean_data\", False)\n return \"\\n\".join([safe_convert(item, clean_data=clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" + }, + "context_id": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "Context ID", + "dynamic": false, + "info": "The context ID of the chat. Adds an extra layer to the local memory.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "context_id", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "data_template": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "Data Template", + "dynamic": false, + "info": "Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "data_template", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "{text}" + }, + "input_value": { + "_input_type": "HandleInput", + "advanced": false, + "display_name": "Inputs", + "dynamic": false, + "info": "Message to be passed as output.", + "input_types": [ + "Data", + "DataFrame", + "Message" + ], + "list": false, + "list_add_label": "Add More", + "name": "input_value", + "override_skip": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "other", + "value": "" + }, + "sender": { + "_input_type": "DropdownInput", + "advanced": true, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Sender Type", + "dynamic": false, + "external_options": {}, + "info": "Type of sender.", + "name": "sender", + "options": [ + "Machine", + "User" + ], + "options_metadata": [], + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "toggle": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "str", + "value": "Machine" + }, + "sender_name": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "Sender Name", + "dynamic": false, + "info": "Name of the sender.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "sender_name", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "AI" + }, + "session_id": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "Session ID", + "dynamic": false, + "info": "The session ID of the chat. If empty, the current session ID parameter will be used.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "session_id", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + }, + "should_store_message": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Store Messages", + "dynamic": false, + "info": "Store the message in the history.", + "list": false, + "list_add_label": "Add More", + "name": "should_store_message", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + } + }, + "tool_mode": false + }, + "showNode": true, + "type": "ChatOutput" + }, + "dragging": false, + "id": "ChatOutput-4Bw8A", + "measured": { + "height": 166, + "width": 320 + }, + "position": { + "x": 2752.913624839253, + "y": 1380.5717174281178 + }, + "selected": false, + "type": "genericNode" } ], "viewport": { - "x": -243.6538442549829, - "y": -690.8048310343635, - "zoom": 0.5802295280329295 + "x": 139.09990607716156, + "y": -141.6960761147153, + "zoom": 0.40088991892418724 } }, "description": "Load your data for chat context with Retrieval Augmented Generation.", "endpoint_name": null, "id": "5488df7c-b93f-4f87-a446-b67028bc0813", "is_component": false, - "last_tested_version": "1.7.0", + "last_tested_version": "1.7.0.dev21", "name": "OpenSearch Ingestion Flow", "tags": [ "openai", diff --git a/flows/openrag_agent.json b/flows/openrag_agent.json index 9479878c..761e434d 100644 --- a/flows/openrag_agent.json +++ b/flows/openrag_agent.json @@ -229,7 +229,7 @@ }, { "animated": false, - "className": "", + "className": "not-running", "data": { "sourceHandle": { "dataType": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding", @@ -277,7 +277,7 @@ "frozen": false, "icon": "type", "legacy": false, - "lf_version": "1.6.0", + "lf_version": "1.7.0.dev21", "metadata": {}, "minimized": false, "output_types": [], @@ -381,7 +381,7 @@ "frozen": false, "icon": "Mcp", "key": "mcp_lf-starter_project", - "last_updated": "2025-11-26T00:05:16.024Z", + "last_updated": "2025-11-26T04:03:11.631Z", "legacy": false, "mcpServerName": "lf-starter_project", "metadata": { @@ -433,7 +433,7 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" }, "_type": "Component", "code": { @@ -543,7 +543,6 @@ "type": "tools", "value": [ { - "_uniqueId": "opensearch_url_ingestion_flow_opensearch_url_ingestion_flow_0", "args": { "input_value": { "anyOf": [ @@ -568,33 +567,6 @@ "tags": [ "opensearch_url_ingestion_flow" ] - }, - { - "_uniqueId": "openrag_opensearch_agent_backu_openrag_opensearch_agent_backu_1", - "args": { - "input_value": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "Message to be passed as input.", - "title": "Input Value" - } - }, - "description": "OpenRAG OpenSearch Agent", - "display_description": "OpenRAG OpenSearch Agent", - "display_name": "openrag_opensearch_agent_backu", - "name": "openrag_opensearch_agent_backu", - "readonly": false, - "status": false, - "tags": [ - "openrag_opensearch_agent_backu" - ] } ] }, @@ -1307,7 +1279,7 @@ ], "frozen": false, "icon": "bot", - "last_updated": "2025-11-26T00:05:16.025Z", + "last_updated": "2025-11-26T04:03:11.633Z", "legacy": false, "metadata": { "code_hash": "d64b11c24a1c", @@ -1357,7 +1329,7 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" }, "_type": "Component", "add_current_date_tool": { @@ -2022,7 +1994,7 @@ ], "frozen": false, "icon": "calculator", - "last_updated": "2025-11-26T00:05:16.026Z", + "last_updated": "2025-11-26T04:03:11.634Z", "legacy": false, "metadata": { "code_hash": "acbe2603b034", @@ -2065,7 +2037,7 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" }, "_type": "Component", "code": { @@ -2187,7 +2159,7 @@ "description": "Generate embeddings using a specified provider.", "display_name": "Embedding Model", "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": false, + "edited": true, "field_order": [ "provider", "api_base", @@ -2203,14 +2175,16 @@ "show_progress_bar", "model_kwargs", "truncate_input_tokens", - "input_text" + "input_text", + "fail_safe_mode" ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T00:05:16.026Z", + "last_updated": "2025-11-26T04:03:41.263Z", "legacy": false, + "lf_version": "1.7.0.dev21", "metadata": { - "code_hash": "9e44c83a5058", + "code_hash": "0e2d6fe67a26", "dependencies": { "dependencies": [ { @@ -2227,7 +2201,7 @@ }, { "name": "lfx", - "version": null + "version": "0.2.0.dev21" }, { "name": "langchain_ollama", @@ -2254,6 +2228,7 @@ "cache": true, "display_name": "Embedding Model", "group_outputs": false, + "hidden": null, "loop_types": null, "method": "build_embeddings", "name": "embeddings", @@ -2273,13 +2248,13 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" }, "_type": "Component", "api_base": { "_input_type": "MessageTextInput", "advanced": true, - "display_name": "API Base URL", + "display_name": "OpenAI API Base URL", "dynamic": false, "info": "Base URL for the API. Leave empty for default.", "input_types": [ @@ -2314,7 +2289,7 @@ "password": true, "placeholder": "", "real_time_refresh": true, - "required": true, + "required": false, "show": true, "title_case": false, "track_in_telemetry": false, @@ -2389,7 +2364,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"fail_safe_mode\",\n display_name=\"Fail-Safe Mode\",\n value=False,\n advanced=True,\n info=\"When enabled, errors will be logged instead of raising exceptions. \"\n \"The component will return None on error.\",\n real_time_refresh=True,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n async def fetch_ollama_models(self) -> list[str]:\n try:\n return await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n except Exception: # noqa: BLE001\n\n logger.exception(\"Error fetching models\")\n return []\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize OpenAI embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n try:\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await self.fetch_ollama_models()\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize Ollama embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to authenticate with IBM watsonx.ai: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n msg = f\"Unknown provider: {provider}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n # Handle fail_safe_mode changes first - set all required fields to False if enabled\n if field_name == \"fail_safe_mode\":\n if field_value: # If fail_safe_mode is enabled\n build_config[\"api_key\"][\"required\"] = False\n elif hasattr(self, \"provider\"):\n # If fail_safe_mode is disabled, restore required flags based on provider\n if self.provider in [\"OpenAI\", \"IBM watsonx.ai\"]:\n build_config[\"api_key\"][\"required\"] = True\n else: # Ollama\n build_config[\"api_key\"][\"required\"] = False\n\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" }, "dimensions": { "_input_type": "IntInput", @@ -2411,6 +2386,28 @@ "type": "int", "value": "" }, + "fail_safe_mode": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Fail-Safe Mode", + "dynamic": false, + "info": "When enabled, errors will be logged instead of raising exceptions. The component will return None on error.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "fail_safe_mode", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, "input_text": { "_input_type": "BoolInput", "advanced": true, @@ -2583,6 +2580,9 @@ "placeholder": "", "real_time_refresh": true, "required": false, + "selected_metadata": { + "icon": "OpenAI" + }, "show": true, "title_case": false, "toggle": false, @@ -2715,8 +2715,9 @@ ], "frozen": false, "icon": "OpenSearch", - "last_updated": "2025-11-26T00:05:16.028Z", + "last_updated": "2025-11-26T04:03:11.636Z", "legacy": false, + "lf_version": "1.7.0.dev21", "metadata": { "code_hash": "8c78d799fef4", "dependencies": { @@ -2762,7 +2763,7 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" }, "_type": "Component", "auth_mode": { @@ -3452,7 +3453,7 @@ "description": "Generate embeddings using a specified provider.", "display_name": "Embedding Model", "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": false, + "edited": true, "field_order": [ "provider", "api_base", @@ -3468,14 +3469,16 @@ "show_progress_bar", "model_kwargs", "truncate_input_tokens", - "input_text" + "input_text", + "fail_safe_mode" ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T00:05:16.028Z", + "last_updated": "2025-11-26T04:03:32.791Z", "legacy": false, + "lf_version": "1.7.0.dev21", "metadata": { - "code_hash": "9e44c83a5058", + "code_hash": "0e2d6fe67a26", "dependencies": { "dependencies": [ { @@ -3492,7 +3495,7 @@ }, { "name": "lfx", - "version": null + "version": "0.2.0.dev21" }, { "name": "langchain_ollama", @@ -3519,6 +3522,7 @@ "cache": true, "display_name": "Embedding Model", "group_outputs": false, + "hidden": null, "loop_types": null, "method": "build_embeddings", "name": "embeddings", @@ -3538,7 +3542,7 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" }, "_type": "Component", "api_base": { @@ -3654,7 +3658,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"fail_safe_mode\",\n display_name=\"Fail-Safe Mode\",\n value=False,\n advanced=True,\n info=\"When enabled, errors will be logged instead of raising exceptions. \"\n \"The component will return None on error.\",\n real_time_refresh=True,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n async def fetch_ollama_models(self) -> list[str]:\n try:\n return await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n except Exception: # noqa: BLE001\n\n logger.exception(\"Error fetching models\")\n return []\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize OpenAI embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n try:\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await self.fetch_ollama_models()\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize Ollama embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to authenticate with IBM watsonx.ai: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n msg = f\"Unknown provider: {provider}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n # Handle fail_safe_mode changes first - set all required fields to False if enabled\n if field_name == \"fail_safe_mode\":\n if field_value: # If fail_safe_mode is enabled\n build_config[\"api_key\"][\"required\"] = False\n elif hasattr(self, \"provider\"):\n # If fail_safe_mode is disabled, restore required flags based on provider\n if self.provider in [\"OpenAI\", \"IBM watsonx.ai\"]:\n build_config[\"api_key\"][\"required\"] = True\n else: # Ollama\n build_config[\"api_key\"][\"required\"] = False\n\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" }, "dimensions": { "_input_type": "IntInput", @@ -3676,6 +3680,28 @@ "type": "int", "value": "" }, + "fail_safe_mode": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Fail-Safe Mode", + "dynamic": false, + "info": "When enabled, errors will be logged instead of raising exceptions. The component will return None on error.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "fail_safe_mode", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, "input_text": { "_input_type": "BoolInput", "advanced": true, @@ -3726,11 +3752,9 @@ "dynamic": false, "external_options": {}, "info": "Select the embedding model to use", + "load_from_db": false, "name": "model", - "options": [ - "bge-large:latest", - "qwen3-embedding:4b" - ], + "options": [], "options_metadata": [], "override_skip": false, "placeholder": "", @@ -3744,7 +3768,7 @@ "trace_as_metadata": true, "track_in_telemetry": true, "type": "str", - "value": "bge-large:latest" + "value": "" }, "model_kwargs": { "_input_type": "DictInput", @@ -3826,6 +3850,7 @@ "dynamic": false, "external_options": {}, "info": "Select the embedding model provider", + "load_from_db": false, "name": "provider", "options": [ "OpenAI", @@ -3952,7 +3977,7 @@ "description": "Generate embeddings using a specified provider.", "display_name": "Embedding Model", "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": false, + "edited": true, "field_order": [ "provider", "api_base", @@ -3968,14 +3993,16 @@ "show_progress_bar", "model_kwargs", "truncate_input_tokens", - "input_text" + "input_text", + "fail_safe_mode" ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T00:05:16.029Z", + "last_updated": "2025-11-26T04:03:22.914Z", "legacy": false, + "lf_version": "1.7.0.dev21", "metadata": { - "code_hash": "9e44c83a5058", + "code_hash": "0e2d6fe67a26", "dependencies": { "dependencies": [ { @@ -3992,7 +4019,7 @@ }, { "name": "lfx", - "version": null + "version": "0.2.0.dev21" }, { "name": "langchain_ollama", @@ -4019,6 +4046,7 @@ "cache": true, "display_name": "Embedding Model", "group_outputs": false, + "hidden": null, "loop_types": null, "method": "build_embeddings", "name": "embeddings", @@ -4038,7 +4066,7 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" }, "_type": "Component", "api_base": { @@ -4079,7 +4107,7 @@ "password": true, "placeholder": "", "real_time_refresh": true, - "required": true, + "required": false, "show": true, "title_case": false, "track_in_telemetry": false, @@ -4154,7 +4182,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"fail_safe_mode\",\n display_name=\"Fail-Safe Mode\",\n value=False,\n advanced=True,\n info=\"When enabled, errors will be logged instead of raising exceptions. \"\n \"The component will return None on error.\",\n real_time_refresh=True,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n async def fetch_ollama_models(self) -> list[str]:\n try:\n return await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n except Exception: # noqa: BLE001\n\n logger.exception(\"Error fetching models\")\n return []\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize OpenAI embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n try:\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await self.fetch_ollama_models()\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize Ollama embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to authenticate with IBM watsonx.ai: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n msg = f\"Unknown provider: {provider}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n # Handle fail_safe_mode changes first - set all required fields to False if enabled\n if field_name == \"fail_safe_mode\":\n if field_value: # If fail_safe_mode is enabled\n build_config[\"api_key\"][\"required\"] = False\n elif hasattr(self, \"provider\"):\n # If fail_safe_mode is disabled, restore required flags based on provider\n if self.provider in [\"OpenAI\", \"IBM watsonx.ai\"]:\n build_config[\"api_key\"][\"required\"] = True\n else: # Ollama\n build_config[\"api_key\"][\"required\"] = False\n\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" }, "dimensions": { "_input_type": "IntInput", @@ -4176,6 +4204,28 @@ "type": "int", "value": "" }, + "fail_safe_mode": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Fail-Safe Mode", + "dynamic": false, + "info": "When enabled, errors will be logged instead of raising exceptions. The component will return None on error.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "fail_safe_mode", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, "input_text": { "_input_type": "BoolInput", "advanced": true, @@ -4226,6 +4276,7 @@ "dynamic": false, "external_options": {}, "info": "Select the embedding model to use", + "load_from_db": false, "name": "model", "options": [ "ibm/granite-embedding-278m-multilingual", @@ -4329,6 +4380,7 @@ "dynamic": false, "external_options": {}, "info": "Select the embedding model provider", + "load_from_db": false, "name": "provider", "options": [ "OpenAI", @@ -4435,7 +4487,7 @@ "width": 320 }, "position": { - "x": 488.01930107919225, + "x": 484.3184528995537, "y": 914.6994738821654 }, "selected": false, @@ -4443,16 +4495,16 @@ } ], "viewport": { - "x": -62.28000558582107, - "y": -442.0218158718001, - "zoom": 0.5391627896946471 + "x": -160.31786606392768, + "y": -442.1474480017346, + "zoom": 0.5404166566474254 } }, "description": "OpenRAG OpenSearch Agent", "endpoint_name": null, "id": "1098eea1-6649-4e1d-aed1-b77249fb8dd0", "is_component": false, - "last_tested_version": "1.7.0", + "last_tested_version": "1.7.0.dev21", "name": "OpenRAG OpenSearch Agent", "tags": [ "assistants", diff --git a/flows/openrag_nudges.json b/flows/openrag_nudges.json index 32c4e0c6..3d704d20 100644 --- a/flows/openrag_nudges.json +++ b/flows/openrag_nudges.json @@ -229,7 +229,7 @@ }, { "animated": false, - "className": "", + "className": "not-running", "data": { "sourceHandle": { "dataType": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding", @@ -1271,7 +1271,7 @@ ], "frozen": false, "icon": "brain-circuit", - "last_updated": "2025-11-26T00:04:41.267Z", + "last_updated": "2025-11-26T03:56:14.475Z", "legacy": false, "metadata": { "code_hash": "694ffc4b17b8", @@ -1361,7 +1361,7 @@ "value": "ebc01d31-1976-46ce-a385-b0240327226c" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" }, "_type": "Component", "api_key": { @@ -1679,6 +1679,7 @@ "frozen": false, "icon": "type", "legacy": false, + "lf_version": "1.7.0.dev21", "metadata": { "code_hash": "7b91454fe0f3", "dependencies": { @@ -1787,7 +1788,7 @@ "description": "Generate embeddings using a specified provider.", "display_name": "Embedding Model", "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": false, + "edited": true, "field_order": [ "provider", "api_base", @@ -1803,14 +1804,16 @@ "show_progress_bar", "model_kwargs", "truncate_input_tokens", - "input_text" + "input_text", + "fail_safe_mode" ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T00:04:41.270Z", + "last_updated": "2025-11-26T04:02:08.544Z", "legacy": false, + "lf_version": "1.7.0.dev21", "metadata": { - "code_hash": "9e44c83a5058", + "code_hash": "0e2d6fe67a26", "dependencies": { "dependencies": [ { @@ -1827,7 +1830,7 @@ }, { "name": "lfx", - "version": null + "version": "0.2.0.dev21" }, { "name": "langchain_ollama", @@ -1854,6 +1857,7 @@ "cache": true, "display_name": "Embedding Model", "group_outputs": false, + "hidden": null, "loop_types": null, "method": "build_embeddings", "name": "embeddings", @@ -1873,13 +1877,13 @@ "value": "ebc01d31-1976-46ce-a385-b0240327226c" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" }, "_type": "Component", "api_base": { "_input_type": "MessageTextInput", "advanced": true, - "display_name": "API Base URL", + "display_name": "OpenAI API Base URL", "dynamic": false, "info": "Base URL for the API. Leave empty for default.", "input_types": [ @@ -1914,7 +1918,7 @@ "password": true, "placeholder": "", "real_time_refresh": true, - "required": true, + "required": false, "show": true, "title_case": false, "track_in_telemetry": false, @@ -1989,7 +1993,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"fail_safe_mode\",\n display_name=\"Fail-Safe Mode\",\n value=False,\n advanced=True,\n info=\"When enabled, errors will be logged instead of raising exceptions. \"\n \"The component will return None on error.\",\n real_time_refresh=True,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n async def fetch_ollama_models(self) -> list[str]:\n try:\n return await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n except Exception: # noqa: BLE001\n\n logger.exception(\"Error fetching models\")\n return []\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize OpenAI embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n try:\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await self.fetch_ollama_models()\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize Ollama embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to authenticate with IBM watsonx.ai: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n msg = f\"Unknown provider: {provider}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n # Handle fail_safe_mode changes first - set all required fields to False if enabled\n if field_name == \"fail_safe_mode\":\n if field_value: # If fail_safe_mode is enabled\n build_config[\"api_key\"][\"required\"] = False\n elif hasattr(self, \"provider\"):\n # If fail_safe_mode is disabled, restore required flags based on provider\n if self.provider in [\"OpenAI\", \"IBM watsonx.ai\"]:\n build_config[\"api_key\"][\"required\"] = True\n else: # Ollama\n build_config[\"api_key\"][\"required\"] = False\n\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" }, "dimensions": { "_input_type": "IntInput", @@ -2011,6 +2015,28 @@ "type": "int", "value": "" }, + "fail_safe_mode": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Fail-Safe Mode", + "dynamic": false, + "info": "When enabled, errors will be logged instead of raising exceptions. The component will return None on error.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "fail_safe_mode", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, "input_text": { "_input_type": "BoolInput", "advanced": true, @@ -2183,6 +2209,9 @@ "placeholder": "", "real_time_refresh": true, "required": false, + "selected_metadata": { + "icon": "OpenAI" + }, "show": true, "title_case": false, "toggle": false, @@ -2833,7 +2862,7 @@ "trace_as_metadata": true, "track_in_telemetry": false, "type": "query", - "value": "" + "value": "\"\"" }, "should_cache_vector_store": { "_input_type": "BoolInput", @@ -3002,7 +3031,7 @@ "description": "Generate embeddings using a specified provider.", "display_name": "Embedding Model", "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": false, + "edited": true, "field_order": [ "provider", "api_base", @@ -3018,14 +3047,16 @@ "show_progress_bar", "model_kwargs", "truncate_input_tokens", - "input_text" + "input_text", + "fail_safe_mode" ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T00:04:41.271Z", + "last_updated": "2025-11-26T04:02:06.284Z", "legacy": false, + "lf_version": "1.7.0.dev21", "metadata": { - "code_hash": "9e44c83a5058", + "code_hash": "0e2d6fe67a26", "dependencies": { "dependencies": [ { @@ -3042,7 +3073,7 @@ }, { "name": "lfx", - "version": null + "version": "0.2.0.dev21" }, { "name": "langchain_ollama", @@ -3069,6 +3100,7 @@ "cache": true, "display_name": "Embedding Model", "group_outputs": false, + "hidden": null, "loop_types": null, "method": "build_embeddings", "name": "embeddings", @@ -3088,7 +3120,7 @@ "value": "ebc01d31-1976-46ce-a385-b0240327226c" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" }, "_type": "Component", "api_base": { @@ -3204,7 +3236,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"fail_safe_mode\",\n display_name=\"Fail-Safe Mode\",\n value=False,\n advanced=True,\n info=\"When enabled, errors will be logged instead of raising exceptions. \"\n \"The component will return None on error.\",\n real_time_refresh=True,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n async def fetch_ollama_models(self) -> list[str]:\n try:\n return await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n except Exception: # noqa: BLE001\n\n logger.exception(\"Error fetching models\")\n return []\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize OpenAI embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n try:\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await self.fetch_ollama_models()\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize Ollama embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to authenticate with IBM watsonx.ai: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n msg = f\"Unknown provider: {provider}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n # Handle fail_safe_mode changes first - set all required fields to False if enabled\n if field_name == \"fail_safe_mode\":\n if field_value: # If fail_safe_mode is enabled\n build_config[\"api_key\"][\"required\"] = False\n elif hasattr(self, \"provider\"):\n # If fail_safe_mode is disabled, restore required flags based on provider\n if self.provider in [\"OpenAI\", \"IBM watsonx.ai\"]:\n build_config[\"api_key\"][\"required\"] = True\n else: # Ollama\n build_config[\"api_key\"][\"required\"] = False\n\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" }, "dimensions": { "_input_type": "IntInput", @@ -3226,6 +3258,28 @@ "type": "int", "value": "" }, + "fail_safe_mode": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Fail-Safe Mode", + "dynamic": false, + "info": "When enabled, errors will be logged instead of raising exceptions. The component will return None on error.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "fail_safe_mode", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, "input_text": { "_input_type": "BoolInput", "advanced": true, @@ -3276,11 +3330,9 @@ "dynamic": false, "external_options": {}, "info": "Select the embedding model to use", + "load_from_db": false, "name": "model", - "options": [ - "bge-large:latest", - "qwen3-embedding:4b" - ], + "options": [], "options_metadata": [], "override_skip": false, "placeholder": "", @@ -3376,6 +3428,7 @@ "dynamic": false, "external_options": {}, "info": "Select the embedding model provider", + "load_from_db": false, "name": "provider", "options": [ "OpenAI", @@ -3503,7 +3556,7 @@ "description": "Generate embeddings using a specified provider.", "display_name": "Embedding Model", "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": false, + "edited": true, "field_order": [ "provider", "api_base", @@ -3519,14 +3572,16 @@ "show_progress_bar", "model_kwargs", "truncate_input_tokens", - "input_text" + "input_text", + "fail_safe_mode" ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T00:04:41.272Z", + "last_updated": "2025-11-26T04:01:20.740Z", "legacy": false, + "lf_version": "1.7.0.dev21", "metadata": { - "code_hash": "9e44c83a5058", + "code_hash": "0e2d6fe67a26", "dependencies": { "dependencies": [ { @@ -3543,7 +3598,7 @@ }, { "name": "lfx", - "version": null + "version": "0.2.0.dev21" }, { "name": "langchain_ollama", @@ -3570,6 +3625,7 @@ "cache": true, "display_name": "Embedding Model", "group_outputs": false, + "hidden": null, "loop_types": null, "method": "build_embeddings", "name": "embeddings", @@ -3589,7 +3645,7 @@ "value": "ebc01d31-1976-46ce-a385-b0240327226c" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" }, "_type": "Component", "api_base": { @@ -3630,7 +3686,7 @@ "password": true, "placeholder": "", "real_time_refresh": true, - "required": true, + "required": false, "show": true, "title_case": false, "track_in_telemetry": false, @@ -3705,7 +3761,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"fail_safe_mode\",\n display_name=\"Fail-Safe Mode\",\n value=False,\n advanced=True,\n info=\"When enabled, errors will be logged instead of raising exceptions. \"\n \"The component will return None on error.\",\n real_time_refresh=True,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n async def fetch_ollama_models(self) -> list[str]:\n try:\n return await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n except Exception: # noqa: BLE001\n\n logger.exception(\"Error fetching models\")\n return []\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize OpenAI embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n try:\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await self.fetch_ollama_models()\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize Ollama embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to authenticate with IBM watsonx.ai: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n msg = f\"Unknown provider: {provider}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n # Handle fail_safe_mode changes first - set all required fields to False if enabled\n if field_name == \"fail_safe_mode\":\n if field_value: # If fail_safe_mode is enabled\n build_config[\"api_key\"][\"required\"] = False\n elif hasattr(self, \"provider\"):\n # If fail_safe_mode is disabled, restore required flags based on provider\n if self.provider in [\"OpenAI\", \"IBM watsonx.ai\"]:\n build_config[\"api_key\"][\"required\"] = True\n else: # Ollama\n build_config[\"api_key\"][\"required\"] = False\n\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" }, "dimensions": { "_input_type": "IntInput", @@ -3727,6 +3783,28 @@ "type": "int", "value": "" }, + "fail_safe_mode": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Fail-Safe Mode", + "dynamic": false, + "info": "When enabled, errors will be logged instead of raising exceptions. The component will return None on error.", + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "fail_safe_mode", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, "input_text": { "_input_type": "BoolInput", "advanced": true, @@ -3777,6 +3855,7 @@ "dynamic": false, "external_options": {}, "info": "Select the embedding model to use", + "load_from_db": false, "name": "model", "options": [ "ibm/granite-embedding-278m-multilingual", @@ -3880,6 +3959,7 @@ "dynamic": false, "external_options": {}, "info": "Select the embedding model provider", + "load_from_db": false, "name": "provider", "options": [ "OpenAI", @@ -3986,24 +4066,24 @@ "width": 320 }, "position": { - "x": -376.6522664393833, - "y": 1053.725431820861 + "x": -390.97188504852653, + "y": 887.8565162649519 }, "selected": false, "type": "genericNode" } ], "viewport": { - "x": 411.76655513325625, - "y": -427.61404452215936, - "zoom": 0.6865723281252197 + "x": 851.9505826903533, + "y": -490.95330887239356, + "zoom": 0.7007838038759889 } }, "description": "OpenRAG OpenSearch Nudges generator, based on the OpenSearch documents and the chat history.", "endpoint_name": null, "id": "ebc01d31-1976-46ce-a385-b0240327226c", "is_component": false, - "last_tested_version": "1.7.0", + "last_tested_version": "1.7.0.dev21", "name": "OpenRAG OpenSearch Nudges", "tags": [ "assistants", From 2fa6efeaa93ee274a1a7157798e65a4023e91299 Mon Sep 17 00:00:00 2001 From: Edwin Jose Date: Wed, 26 Nov 2025 00:43:40 -0500 Subject: [PATCH 20/43] update flows --- flows/ingestion_flow.json | 393 ++++---------------------------------- flows/openrag_agent.json | 49 ++--- flows/openrag_nudges.json | 49 ++--- 3 files changed, 88 insertions(+), 403 deletions(-) diff --git a/flows/ingestion_flow.json b/flows/ingestion_flow.json index e3e0252a..66da194f 100644 --- a/flows/ingestion_flow.json +++ b/flows/ingestion_flow.json @@ -262,7 +262,7 @@ }, { "animated": false, - "className": "", + "className": "not-running", "data": { "sourceHandle": { "dataType": "AdvancedDynamicFormBuilder", @@ -400,36 +400,6 @@ "sourceHandle": "{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-EAo9iœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}", "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4", "targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4œ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}" - }, - { - "animated": false, - "className": "", - "data": { - "sourceHandle": { - "dataType": "AdvancedDynamicFormBuilder", - "id": "AdvancedDynamicFormBuilder-81Exw", - "name": "form_data", - "output_types": [ - "Data" - ] - }, - "targetHandle": { - "fieldName": "input_value", - "id": "ChatOutput-4Bw8A", - "inputTypes": [ - "Data", - "DataFrame", - "Message" - ], - "type": "other" - } - }, - "id": "xy-edge__AdvancedDynamicFormBuilder-81Exw{œdataTypeœ:œAdvancedDynamicFormBuilderœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œnameœ:œform_dataœ,œoutput_typesœ:[œDataœ]}-ChatOutput-4Bw8A{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-4Bw8Aœ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}", - "selected": false, - "source": "AdvancedDynamicFormBuilder-81Exw", - "sourceHandle": "{œdataTypeœ:œAdvancedDynamicFormBuilderœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œnameœ:œform_dataœ,œoutput_typesœ:[œDataœ]}", - "target": "ChatOutput-4Bw8A", - "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-4Bw8Aœ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}" } ], "nodes": [ @@ -697,7 +667,7 @@ ], "frozen": false, "icon": "braces", - "last_updated": "2025-11-26T04:04:23.261Z", + "last_updated": "2025-11-26T04:50:14.309Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": {}, @@ -747,7 +717,7 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "code": { @@ -2082,7 +2052,7 @@ ], "frozen": false, "icon": "table", - "last_updated": "2025-11-26T04:04:23.363Z", + "last_updated": "2025-11-26T04:50:14.433Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": { @@ -2129,7 +2099,7 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "ascending": { @@ -2533,7 +2503,7 @@ ], "frozen": false, "icon": "table", - "last_updated": "2025-11-26T04:04:23.363Z", + "last_updated": "2025-11-26T04:50:14.433Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": { @@ -2580,7 +2550,7 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "ascending": { @@ -2984,7 +2954,7 @@ ], "frozen": false, "icon": "table", - "last_updated": "2025-11-26T04:04:23.364Z", + "last_updated": "2025-11-26T04:50:14.434Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": { @@ -3031,7 +3001,7 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "ascending": { @@ -3445,7 +3415,7 @@ "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.", "display_name": "OpenSearch (Multi-Model Multi-Embedding)", "documentation": "", - "edited": false, + "edited": true, "field_order": [ "docs_metadata", "opensearch_url", @@ -3474,11 +3444,10 @@ ], "frozen": false, "icon": "OpenSearch", - "last_updated": "2025-11-26T03:03:10.896Z", + "last_updated": "2025-11-26T05:11:48.277Z", "legacy": false, - "lf_version": "1.7.0.dev21", "metadata": { - "code_hash": "8c78d799fef4", + "code_hash": "000397b17863", "dependencies": { "dependencies": [ { @@ -3487,12 +3456,12 @@ }, { "name": "lfx", - "version": null + "version": "0.2.0.dev21" } ], "total_dependencies": 2 }, - "module": "lfx.components.elastic.opensearch_multimodal.OpenSearchVectorStoreComponentMultimodalMultiEmbedding" + "module": "custom_components.opensearch_multimodel_multiembedding" }, "minimized": false, "output_types": [], @@ -3502,6 +3471,7 @@ "cache": true, "display_name": "Search Results", "group_outputs": false, + "hidden": null, "loop_types": null, "method": "search_documents", "name": "search_results", @@ -3519,6 +3489,7 @@ "cache": true, "display_name": "DataFrame", "group_outputs": false, + "hidden": null, "loop_types": null, "method": "as_dataframe", "name": "dataframe", @@ -3556,7 +3527,7 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "auth_mode": { @@ -3568,6 +3539,7 @@ "dynamic": false, "external_options": {}, "info": "Authentication method: 'basic' for username/password authentication, or 'jwt' for JSON Web Token (Bearer) authentication.", + "load_from_db": false, "name": "auth_mode", "options": [ "basic", @@ -3623,7 +3595,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n self.log(self.ingest_data)\n client = self.build_client()\n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n docs = self.ingest_data or []\n if not docs:\n self.log(\"No documents to ingest.\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n if not embeddings_list:\n msg = \"At least one embedding is required to embed documents.\"\n raise ValueError(msg)\n\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n max_workers = min(max(len(texts), 1), 8)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n return [self._get_embedding_model_name()]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models and isinstance(available_models, dict):\n logger.info(f\"Embedding object {idx} provides {len(available_models)} models via available_models dict\")\n for model_name_key, dedicated_embedding in available_models.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n else:\n # No matching embedding found for this model\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available models: {list(embedding_by_model.keys())}\"\n )\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n\n if not query_embeddings:\n msg = \"Failed to generate embeddings for any model\"\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n raw = self.search(self.search_query or \"\")\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n self.log(self.ingest_data)\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" + "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n \n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"🔄 Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"Starting ingestion mode...\")\n \n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n \n logger.debug(f\"[INGESTION] ingest_data type: {type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\")\n logger.debug(f\"[INGESTION] ingest_data content: {self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\")\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"✓ Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n \n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n \n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n \n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"✓ Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n max_workers = min(max(len(texts), 1), 8)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(f\"✓ Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\")\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n return [self._get_embedding_model_name()]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n \n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n \n if not embeddings_list:\n logger.error(\"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\")\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models and isinstance(available_models, dict):\n logger.info(f\"Embedding object {idx} provides {len(available_models)} models via available_models dict\")\n for model_name_key, dedicated_embedding in available_models.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n else:\n # No matching embedding found for this model\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available models: {list(embedding_by_model.keys())}\"\n )\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n\n if not query_embeddings:\n msg = \"Failed to generate embeddings for any model\"\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n if self._cached_vector_store is None:\n self.build_vector_store()\n \n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n \n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" }, "docs_metadata": { "_input_type": "TableInput", @@ -3995,7 +3967,7 @@ "trace_as_metadata": true, "track_in_telemetry": false, "type": "query", - "value": "langflow" + "value": "" }, "should_cache_vector_store": { "_input_type": "BoolInput", @@ -4183,7 +4155,7 @@ ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T04:04:42.856Z", + "last_updated": "2025-11-26T04:50:14.312Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": { @@ -4251,7 +4223,7 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "api_base": { @@ -4712,7 +4684,7 @@ ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T04:04:40.931Z", + "last_updated": "2025-11-26T04:50:14.312Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": { @@ -4780,7 +4752,7 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "api_base": { @@ -4992,7 +4964,10 @@ "info": "Select the embedding model to use", "load_from_db": false, "name": "model", - "options": [], + "options": [ + "bge-large:latest", + "qwen3-embedding:4b" + ], "options_metadata": [], "override_skip": false, "placeholder": "", @@ -5235,7 +5210,7 @@ ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T04:04:37.496Z", + "last_updated": "2025-11-26T04:50:28.061Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": { @@ -5303,7 +5278,7 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "api_base": { @@ -5344,7 +5319,7 @@ "password": true, "placeholder": "", "real_time_refresh": true, - "required": false, + "required": true, "show": true, "title_case": false, "track_in_telemetry": false, @@ -5461,7 +5436,7 @@ "trace_as_metadata": true, "track_in_telemetry": true, "type": "bool", - "value": true + "value": false }, "input_text": { "_input_type": "BoolInput", @@ -5725,310 +5700,12 @@ }, "selected": false, "type": "genericNode" - }, - { - "data": { - "id": "ChatOutput-4Bw8A", - "node": { - "base_classes": [ - "Message" - ], - "beta": false, - "conditional_paths": [], - "custom_fields": {}, - "description": "Display a chat message in the Playground.", - "display_name": "Chat Output", - "documentation": "https://docs.langflow.org/chat-input-and-output", - "edited": false, - "field_order": [ - "input_value", - "should_store_message", - "sender", - "sender_name", - "session_id", - "context_id", - "data_template", - "clean_data" - ], - "frozen": false, - "icon": "MessagesSquare", - "legacy": false, - "lf_version": "1.7.0.dev21", - "metadata": { - "code_hash": "cae45e2d53f6", - "dependencies": { - "dependencies": [ - { - "name": "orjson", - "version": "3.10.15" - }, - { - "name": "fastapi", - "version": "0.120.0" - }, - { - "name": "lfx", - "version": "0.2.0.dev21" - } - ], - "total_dependencies": 3 - }, - "module": "lfx.components.input_output.chat_output.ChatOutput" - }, - "minimized": true, - "output_types": [], - "outputs": [ - { - "allows_loop": false, - "cache": true, - "display_name": "Output Message", - "group_outputs": false, - "method": "message_response", - "name": "message", - "selected": "Message", - "tool_mode": true, - "types": [ - "Message" - ], - "value": "__UNDEFINED__" - } - ], - "pinned": false, - "template": { - "_type": "Component", - "clean_data": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Basic Clean Data", - "dynamic": false, - "info": "Whether to clean data before converting to string.", - "list": false, - "list_add_label": "Add More", - "name": "clean_data", - "override_skip": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "bool", - "value": true - }, - "code": { - "advanced": true, - "dynamic": true, - "fileTypes": [], - "file_path": "", - "info": "", - "list": false, - "load_from_db": false, - "multiline": true, - "name": "code", - "password": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "type": "code", - "value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/chat-input-and-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n advanced=True,\n info=\"Whether to clean data before converting to string.\",\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, _, display_name, source_id = self.get_properties_from_source_component()\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message) and not self.is_connected_to_chat_input():\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id or self.graph.session_id or \"\"\n message.context_id = self.context_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n\n # Store message if needed\n if message.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n clean_data: bool = getattr(self, \"clean_data\", False)\n return \"\\n\".join([safe_convert(item, clean_data=clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n" - }, - "context_id": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Context ID", - "dynamic": false, - "info": "The context ID of the chat. Adds an extra layer to the local memory.", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "context_id", - "override_skip": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "" - }, - "data_template": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Data Template", - "dynamic": false, - "info": "Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "data_template", - "override_skip": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "{text}" - }, - "input_value": { - "_input_type": "HandleInput", - "advanced": false, - "display_name": "Inputs", - "dynamic": false, - "info": "Message to be passed as output.", - "input_types": [ - "Data", - "DataFrame", - "Message" - ], - "list": false, - "list_add_label": "Add More", - "name": "input_value", - "override_skip": false, - "placeholder": "", - "required": true, - "show": true, - "title_case": false, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "other", - "value": "" - }, - "sender": { - "_input_type": "DropdownInput", - "advanced": true, - "combobox": false, - "dialog_inputs": {}, - "display_name": "Sender Type", - "dynamic": false, - "external_options": {}, - "info": "Type of sender.", - "name": "sender", - "options": [ - "Machine", - "User" - ], - "options_metadata": [], - "override_skip": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "toggle": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "str", - "value": "Machine" - }, - "sender_name": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Sender Name", - "dynamic": false, - "info": "Name of the sender.", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "sender_name", - "override_skip": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "AI" - }, - "session_id": { - "_input_type": "MessageTextInput", - "advanced": true, - "display_name": "Session ID", - "dynamic": false, - "info": "The session ID of the chat. If empty, the current session ID parameter will be used.", - "input_types": [ - "Message" - ], - "list": false, - "list_add_label": "Add More", - "load_from_db": false, - "name": "session_id", - "override_skip": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_input": true, - "trace_as_metadata": true, - "track_in_telemetry": false, - "type": "str", - "value": "" - }, - "should_store_message": { - "_input_type": "BoolInput", - "advanced": true, - "display_name": "Store Messages", - "dynamic": false, - "info": "Store the message in the history.", - "list": false, - "list_add_label": "Add More", - "name": "should_store_message", - "override_skip": false, - "placeholder": "", - "required": false, - "show": true, - "title_case": false, - "tool_mode": false, - "trace_as_metadata": true, - "track_in_telemetry": true, - "type": "bool", - "value": true - } - }, - "tool_mode": false - }, - "showNode": true, - "type": "ChatOutput" - }, - "dragging": false, - "id": "ChatOutput-4Bw8A", - "measured": { - "height": 166, - "width": 320 - }, - "position": { - "x": 2752.913624839253, - "y": 1380.5717174281178 - }, - "selected": false, - "type": "genericNode" } ], "viewport": { - "x": 139.09990607716156, - "y": -141.6960761147153, - "zoom": 0.40088991892418724 + "x": -579.5279887575305, + "y": -602.558848067286, + "zoom": 0.6251924232576567 } }, "description": "Load your data for chat context with Retrieval Augmented Generation.", diff --git a/flows/openrag_agent.json b/flows/openrag_agent.json index 761e434d..c97f0240 100644 --- a/flows/openrag_agent.json +++ b/flows/openrag_agent.json @@ -229,7 +229,7 @@ }, { "animated": false, - "className": "not-running", + "className": "", "data": { "sourceHandle": { "dataType": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding", @@ -381,7 +381,7 @@ "frozen": false, "icon": "Mcp", "key": "mcp_lf-starter_project", - "last_updated": "2025-11-26T04:03:11.631Z", + "last_updated": "2025-11-26T05:22:26.296Z", "legacy": false, "mcpServerName": "lf-starter_project", "metadata": { @@ -433,7 +433,7 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "code": { @@ -1279,7 +1279,7 @@ ], "frozen": false, "icon": "bot", - "last_updated": "2025-11-26T04:03:11.633Z", + "last_updated": "2025-11-26T05:22:26.298Z", "legacy": false, "metadata": { "code_hash": "d64b11c24a1c", @@ -1329,7 +1329,7 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "add_current_date_tool": { @@ -1994,7 +1994,7 @@ ], "frozen": false, "icon": "calculator", - "last_updated": "2025-11-26T04:03:11.634Z", + "last_updated": "2025-11-26T05:22:26.299Z", "legacy": false, "metadata": { "code_hash": "acbe2603b034", @@ -2037,7 +2037,7 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "code": { @@ -2180,7 +2180,7 @@ ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T04:03:41.263Z", + "last_updated": "2025-11-26T05:22:26.299Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": { @@ -2248,7 +2248,7 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "api_base": { @@ -2686,7 +2686,7 @@ "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.", "display_name": "OpenSearch (Multi-Model Multi-Embedding)", "documentation": "", - "edited": false, + "edited": true, "field_order": [ "docs_metadata", "opensearch_url", @@ -2715,11 +2715,10 @@ ], "frozen": false, "icon": "OpenSearch", - "last_updated": "2025-11-26T04:03:11.636Z", + "last_updated": "2025-11-26T05:22:41.532Z", "legacy": false, - "lf_version": "1.7.0.dev21", "metadata": { - "code_hash": "8c78d799fef4", + "code_hash": "000397b17863", "dependencies": { "dependencies": [ { @@ -2728,12 +2727,12 @@ }, { "name": "lfx", - "version": null + "version": "0.2.0.dev21" } ], "total_dependencies": 2 }, - "module": "lfx.components.elastic.opensearch_multimodal.OpenSearchVectorStoreComponentMultimodalMultiEmbedding" + "module": "custom_components.opensearch_multimodel_multiembedding" }, "minimized": false, "output_types": [], @@ -2763,7 +2762,7 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "auth_mode": { @@ -2775,6 +2774,7 @@ "dynamic": false, "external_options": {}, "info": "Authentication method: 'basic' for username/password authentication, or 'jwt' for JSON Web Token (Bearer) authentication.", + "load_from_db": false, "name": "auth_mode", "options": [ "basic", @@ -2830,7 +2830,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n self.log(self.ingest_data)\n client = self.build_client()\n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n docs = self.ingest_data or []\n if not docs:\n self.log(\"No documents to ingest.\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n if not embeddings_list:\n msg = \"At least one embedding is required to embed documents.\"\n raise ValueError(msg)\n\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n max_workers = min(max(len(texts), 1), 8)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n return [self._get_embedding_model_name()]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models and isinstance(available_models, dict):\n logger.info(f\"Embedding object {idx} provides {len(available_models)} models via available_models dict\")\n for model_name_key, dedicated_embedding in available_models.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n else:\n # No matching embedding found for this model\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available models: {list(embedding_by_model.keys())}\"\n )\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n\n if not query_embeddings:\n msg = \"Failed to generate embeddings for any model\"\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n raw = self.search(self.search_query or \"\")\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n self.log(self.ingest_data)\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" + "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n \n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"🔄 Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"Starting ingestion mode...\")\n \n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n \n logger.debug(f\"[INGESTION] ingest_data type: {type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\")\n logger.debug(f\"[INGESTION] ingest_data content: {self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\")\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"✓ Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n \n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n \n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n \n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"✓ Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n max_workers = min(max(len(texts), 1), 8)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(f\"✓ Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\")\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n return [self._get_embedding_model_name()]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n \n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n \n if not embeddings_list:\n logger.error(\"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\")\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models and isinstance(available_models, dict):\n logger.info(f\"Embedding object {idx} provides {len(available_models)} models via available_models dict\")\n for model_name_key, dedicated_embedding in available_models.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n else:\n # No matching embedding found for this model\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available models: {list(embedding_by_model.keys())}\"\n )\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n\n if not query_embeddings:\n msg = \"Failed to generate embeddings for any model\"\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n if self._cached_vector_store is None:\n self.build_vector_store()\n \n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n \n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" }, "docs_metadata": { "_input_type": "TableInput", @@ -3474,7 +3474,7 @@ ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T04:03:32.791Z", + "last_updated": "2025-11-26T05:22:26.302Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": { @@ -3542,7 +3542,7 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "api_base": { @@ -3754,7 +3754,10 @@ "info": "Select the embedding model to use", "load_from_db": false, "name": "model", - "options": [], + "options": [ + "bge-large:latest", + "qwen3-embedding:4b" + ], "options_metadata": [], "override_skip": false, "placeholder": "", @@ -3998,7 +4001,7 @@ ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T04:03:22.914Z", + "last_updated": "2025-11-26T05:22:26.303Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": { @@ -4066,7 +4069,7 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "api_base": { @@ -4495,7 +4498,7 @@ } ], "viewport": { - "x": -160.31786606392768, + "x": -159.31786606392757, "y": -442.1474480017346, "zoom": 0.5404166566474254 } diff --git a/flows/openrag_nudges.json b/flows/openrag_nudges.json index 3d704d20..adebbf9e 100644 --- a/flows/openrag_nudges.json +++ b/flows/openrag_nudges.json @@ -229,7 +229,7 @@ }, { "animated": false, - "className": "not-running", + "className": "", "data": { "sourceHandle": { "dataType": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding", @@ -1271,7 +1271,7 @@ ], "frozen": false, "icon": "brain-circuit", - "last_updated": "2025-11-26T03:56:14.475Z", + "last_updated": "2025-11-26T05:25:03.272Z", "legacy": false, "metadata": { "code_hash": "694ffc4b17b8", @@ -1361,7 +1361,7 @@ "value": "ebc01d31-1976-46ce-a385-b0240327226c" }, "_frontend_node_folder_id": { - "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "api_key": { @@ -1809,7 +1809,7 @@ ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T04:02:08.544Z", + "last_updated": "2025-11-26T05:25:03.274Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": { @@ -1877,7 +1877,7 @@ "value": "ebc01d31-1976-46ce-a385-b0240327226c" }, "_frontend_node_folder_id": { - "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "api_base": { @@ -2315,7 +2315,7 @@ "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.", "display_name": "OpenSearch (Multi-Model Multi-Embedding)", "documentation": "", - "edited": false, + "edited": true, "field_order": [ "docs_metadata", "opensearch_url", @@ -2344,10 +2344,10 @@ ], "frozen": false, "icon": "OpenSearch", - "last_updated": "2025-11-25T23:38:50.335Z", + "last_updated": "2025-11-26T05:25:26.304Z", "legacy": false, "metadata": { - "code_hash": "8c78d799fef4", + "code_hash": "000397b17863", "dependencies": { "dependencies": [ { @@ -2356,12 +2356,12 @@ }, { "name": "lfx", - "version": null + "version": "0.2.0.dev21" } ], "total_dependencies": 2 }, - "module": "lfx.components.elastic.opensearch_multimodal.OpenSearchVectorStoreComponentMultimodalMultiEmbedding" + "module": "custom_components.opensearch_multimodel_multiembedding" }, "minimized": false, "output_types": [], @@ -2371,11 +2371,13 @@ "cache": true, "display_name": "Search Results", "group_outputs": false, + "hidden": null, "loop_types": null, "method": "search_documents", "name": "search_results", "options": null, "required_inputs": null, + "selected": "Data", "tool_mode": true, "types": [ "Data" @@ -2387,6 +2389,7 @@ "cache": true, "display_name": "DataFrame", "group_outputs": false, + "hidden": null, "loop_types": null, "method": "as_dataframe", "name": "dataframe", @@ -2410,6 +2413,7 @@ "name": "vectorstoreconnection", "options": null, "required_inputs": null, + "selected": "VectorStore", "tool_mode": true, "types": [ "VectorStore" @@ -2420,10 +2424,10 @@ "pinned": false, "template": { "_frontend_node_flow_id": { - "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" + "value": "ebc01d31-1976-46ce-a385-b0240327226c" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "auth_mode": { @@ -2435,6 +2439,7 @@ "dynamic": false, "external_options": {}, "info": "Authentication method: 'basic' for username/password authentication, or 'jwt' for JSON Web Token (Bearer) authentication.", + "load_from_db": false, "name": "auth_mode", "options": [ "basic", @@ -2490,7 +2495,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n self.log(self.ingest_data)\n client = self.build_client()\n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n docs = self.ingest_data or []\n if not docs:\n self.log(\"No documents to ingest.\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n if not embeddings_list:\n msg = \"At least one embedding is required to embed documents.\"\n raise ValueError(msg)\n\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n max_workers = min(max(len(texts), 1), 8)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n return [self._get_embedding_model_name()]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models and isinstance(available_models, dict):\n logger.info(f\"Embedding object {idx} provides {len(available_models)} models via available_models dict\")\n for model_name_key, dedicated_embedding in available_models.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n else:\n # No matching embedding found for this model\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available models: {list(embedding_by_model.keys())}\"\n )\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n\n if not query_embeddings:\n msg = \"Failed to generate embeddings for any model\"\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n raw = self.search(self.search_query or \"\")\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n self.log(self.ingest_data)\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" + "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n \n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"🔄 Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"Starting ingestion mode...\")\n \n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n \n logger.debug(f\"[INGESTION] ingest_data type: {type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\")\n logger.debug(f\"[INGESTION] ingest_data content: {self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\")\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"✓ Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n \n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n \n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n \n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"✓ Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n max_workers = min(max(len(texts), 1), 8)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(f\"✓ Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\")\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n return [self._get_embedding_model_name()]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n \n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n \n if not embeddings_list:\n logger.error(\"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\")\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models and isinstance(available_models, dict):\n logger.info(f\"Embedding object {idx} provides {len(available_models)} models via available_models dict\")\n for model_name_key, dedicated_embedding in available_models.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n else:\n # No matching embedding found for this model\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available models: {list(embedding_by_model.keys())}\"\n )\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n\n if not query_embeddings:\n msg = \"Failed to generate embeddings for any model\"\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n if self._cached_vector_store is None:\n self.build_vector_store()\n \n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n \n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" }, "docs_metadata": { "_input_type": "TableInput", @@ -2737,7 +2742,7 @@ "title_case": false, "track_in_telemetry": false, "type": "str", - "value": "JWT" + "value": "OWNER_NAME" }, "m": { "_input_type": "IntInput", @@ -2862,7 +2867,7 @@ "trace_as_metadata": true, "track_in_telemetry": false, "type": "query", - "value": "\"\"" + "value": "" }, "should_cache_vector_store": { "_input_type": "BoolInput", @@ -3052,7 +3057,7 @@ ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T04:02:06.284Z", + "last_updated": "2025-11-26T05:25:03.275Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": { @@ -3120,7 +3125,7 @@ "value": "ebc01d31-1976-46ce-a385-b0240327226c" }, "_frontend_node_folder_id": { - "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "api_base": { @@ -3577,7 +3582,7 @@ ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T04:01:20.740Z", + "last_updated": "2025-11-26T05:25:03.275Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": { @@ -3645,7 +3650,7 @@ "value": "ebc01d31-1976-46ce-a385-b0240327226c" }, "_frontend_node_folder_id": { - "value": "a7d8cc21-8de6-4cc6-a617-3494cb304e08" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "api_base": { @@ -4074,9 +4079,9 @@ } ], "viewport": { - "x": 851.9505826903533, - "y": -490.95330887239356, - "zoom": 0.7007838038759889 + "x": 493.20780895738403, + "y": -153.25104936517073, + "zoom": 0.502446306646234 } }, "description": "OpenRAG OpenSearch Nudges generator, based on the OpenSearch documents and the chat history.", From 998d8b91950ccd53652b673e1bf76d990a2d4465 Mon Sep 17 00:00:00 2001 From: Edwin Jose Date: Wed, 26 Nov 2025 01:15:30 -0500 Subject: [PATCH 21/43] Add fail-safe mode to embedding and OpenSearch components Introduces a 'fail_safe_mode' option to the Embedding Model and OpenSearch (Multi-Model Multi-Embedding) components, allowing errors to be logged and None returned instead of raising exceptions. Refactors embedding model fetching logic for better error handling and updates component metadata, field order, and dependencies. Also adds 'className' fields and updates frontend node folder IDs for improved UI consistency. --- flows/openrag_url_mcp.json | 173 +++++++++++++++++++++++++--------- src/api/settings.py | 73 +++++++++++++- src/main.py | 58 +++++++++++- src/services/auth_service.py | 10 ++ src/utils/langflow_headers.py | 38 ++++++++ 5 files changed, 306 insertions(+), 46 deletions(-) diff --git a/flows/openrag_url_mcp.json b/flows/openrag_url_mcp.json index a04b926e..7ed64cab 100644 --- a/flows/openrag_url_mcp.json +++ b/flows/openrag_url_mcp.json @@ -175,6 +175,7 @@ }, { "animated": false, + "className": "", "data": { "sourceHandle": { "dataType": "SecretInput", @@ -203,6 +204,7 @@ }, { "animated": false, + "className": "", "data": { "sourceHandle": { "dataType": "SecretInput", @@ -231,6 +233,7 @@ }, { "animated": false, + "className": "", "data": { "sourceHandle": { "dataType": "SecretInput", @@ -259,6 +262,7 @@ }, { "animated": false, + "className": "", "data": { "sourceHandle": { "dataType": "SecretInput", @@ -287,6 +291,7 @@ }, { "animated": false, + "className": "", "data": { "sourceHandle": { "dataType": "AdvancedDynamicFormBuilder", @@ -314,6 +319,7 @@ }, { "animated": false, + "className": "", "data": { "sourceHandle": { "dataType": "SplitText", @@ -342,6 +348,7 @@ }, { "animated": false, + "className": "", "data": { "sourceHandle": { "dataType": "EmbeddingModel", @@ -369,6 +376,7 @@ }, { "animated": false, + "className": "", "data": { "sourceHandle": { "dataType": "EmbeddingModel", @@ -396,6 +404,7 @@ }, { "animated": false, + "className": "", "data": { "sourceHandle": { "dataType": "EmbeddingModel", @@ -1080,7 +1089,7 @@ ], "frozen": false, "icon": "table", - "last_updated": "2025-11-25T23:37:45.067Z", + "last_updated": "2025-11-26T06:11:22.958Z", "legacy": false, "metadata": { "code_hash": "904f4eaebccd", @@ -1126,7 +1135,7 @@ "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "ascending": { @@ -1530,7 +1539,7 @@ ], "frozen": false, "icon": "table", - "last_updated": "2025-11-25T23:37:45.068Z", + "last_updated": "2025-11-26T06:11:22.960Z", "legacy": false, "metadata": { "code_hash": "904f4eaebccd", @@ -1576,7 +1585,7 @@ "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "ascending": { @@ -1980,7 +1989,7 @@ ], "frozen": false, "icon": "table", - "last_updated": "2025-11-25T23:37:45.069Z", + "last_updated": "2025-11-26T06:11:22.961Z", "legacy": false, "metadata": { "code_hash": "904f4eaebccd", @@ -2026,7 +2035,7 @@ "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "ascending": { @@ -3012,7 +3021,7 @@ "description": "Generate embeddings using a specified provider.", "display_name": "Embedding Model", "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": false, + "edited": true, "field_order": [ "provider", "api_base", @@ -3028,14 +3037,15 @@ "show_progress_bar", "model_kwargs", "truncate_input_tokens", - "input_text" + "input_text", + "fail_safe_mode" ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-25T23:37:44.980Z", + "last_updated": "2025-11-26T06:11:22.856Z", "legacy": false, "metadata": { - "code_hash": "9e44c83a5058", + "code_hash": "0e2d6fe67a26", "dependencies": { "dependencies": [ { @@ -3052,7 +3062,7 @@ }, { "name": "lfx", - "version": null + "version": "0.2.0.dev21" }, { "name": "langchain_ollama", @@ -3079,6 +3089,7 @@ "cache": true, "display_name": "Embedding Model", "group_outputs": false, + "hidden": null, "loop_types": null, "method": "build_embeddings", "name": "embeddings", @@ -3098,7 +3109,7 @@ "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "api_base": { @@ -3139,7 +3150,7 @@ "password": true, "placeholder": "", "real_time_refresh": true, - "required": true, + "required": false, "show": true, "title_case": false, "track_in_telemetry": false, @@ -3214,7 +3225,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"fail_safe_mode\",\n display_name=\"Fail-Safe Mode\",\n value=False,\n advanced=True,\n info=\"When enabled, errors will be logged instead of raising exceptions. \"\n \"The component will return None on error.\",\n real_time_refresh=True,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n async def fetch_ollama_models(self) -> list[str]:\n try:\n return await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n except Exception: # noqa: BLE001\n\n logger.exception(\"Error fetching models\")\n return []\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize OpenAI embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n try:\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await self.fetch_ollama_models()\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize Ollama embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to authenticate with IBM watsonx.ai: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n msg = f\"Unknown provider: {provider}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n # Handle fail_safe_mode changes first - set all required fields to False if enabled\n if field_name == \"fail_safe_mode\":\n if field_value: # If fail_safe_mode is enabled\n build_config[\"api_key\"][\"required\"] = False\n elif hasattr(self, \"provider\"):\n # If fail_safe_mode is disabled, restore required flags based on provider\n if self.provider in [\"OpenAI\", \"IBM watsonx.ai\"]:\n build_config[\"api_key\"][\"required\"] = True\n else: # Ollama\n build_config[\"api_key\"][\"required\"] = False\n\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" }, "dimensions": { "_input_type": "IntInput", @@ -3236,6 +3247,27 @@ "type": "int", "value": "" }, + "fail_safe_mode": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Fail-Safe Mode", + "dynamic": false, + "info": "When enabled, errors will be logged instead of raising exceptions. The component will return None on error.", + "list": false, + "list_add_label": "Add More", + "name": "fail_safe_mode", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, "input_text": { "_input_type": "BoolInput", "advanced": true, @@ -3511,7 +3543,7 @@ "description": "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.", "display_name": "OpenSearch (Multi-Model Multi-Embedding)", "documentation": "", - "edited": false, + "edited": true, "field_order": [ "docs_metadata", "opensearch_url", @@ -3540,10 +3572,10 @@ ], "frozen": false, "icon": "OpenSearch", - "last_updated": "2025-11-25T23:38:50.335Z", + "last_updated": "2025-11-26T05:27:07.589Z", "legacy": false, "metadata": { - "code_hash": "8c78d799fef4", + "code_hash": "000397b17863", "dependencies": { "dependencies": [ { @@ -3552,12 +3584,12 @@ }, { "name": "lfx", - "version": null + "version": "0.2.0.dev21" } ], "total_dependencies": 2 }, - "module": "lfx.components.elastic.opensearch_multimodal.OpenSearchVectorStoreComponentMultimodalMultiEmbedding" + "module": "custom_components.opensearch_multimodel_multiembedding" }, "minimized": false, "output_types": [], @@ -3567,6 +3599,7 @@ "cache": true, "display_name": "Search Results", "group_outputs": false, + "hidden": null, "loop_types": null, "method": "search_documents", "name": "search_results", @@ -3584,6 +3617,7 @@ "cache": true, "display_name": "DataFrame", "group_outputs": false, + "hidden": null, "loop_types": null, "method": "as_dataframe", "name": "dataframe", @@ -3621,7 +3655,7 @@ "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "auth_mode": { @@ -3633,6 +3667,7 @@ "dynamic": false, "external_options": {}, "info": "Authentication method: 'basic' for username/password authentication, or 'jwt' for JSON Web Token (Bearer) authentication.", + "load_from_db": false, "name": "auth_mode", "options": [ "basic", @@ -3688,7 +3723,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n self.log(self.ingest_data)\n client = self.build_client()\n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n docs = self.ingest_data or []\n if not docs:\n self.log(\"No documents to ingest.\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n if not embeddings_list:\n msg = \"At least one embedding is required to embed documents.\"\n raise ValueError(msg)\n\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n max_workers = min(max(len(texts), 1), 8)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n return [self._get_embedding_model_name()]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models and isinstance(available_models, dict):\n logger.info(f\"Embedding object {idx} provides {len(available_models)} models via available_models dict\")\n for model_name_key, dedicated_embedding in available_models.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n else:\n # No matching embedding found for this model\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available models: {list(embedding_by_model.keys())}\"\n )\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n\n if not query_embeddings:\n msg = \"Failed to generate embeddings for any model\"\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n raw = self.search(self.search_query or \"\")\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n self.log(self.ingest_data)\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" + "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n \n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"🔄 Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"Starting ingestion mode...\")\n \n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n \n logger.debug(f\"[INGESTION] ingest_data type: {type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\")\n logger.debug(f\"[INGESTION] ingest_data content: {self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\")\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"✓ Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n \n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n \n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n \n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"✓ Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n max_workers = min(max(len(texts), 1), 8)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(f\"✓ Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\")\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n return [self._get_embedding_model_name()]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n \n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n \n if not embeddings_list:\n logger.error(\"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\")\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models and isinstance(available_models, dict):\n logger.info(f\"Embedding object {idx} provides {len(available_models)} models via available_models dict\")\n for model_name_key, dedicated_embedding in available_models.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n else:\n # No matching embedding found for this model\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available models: {list(embedding_by_model.keys())}\"\n )\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n\n if not query_embeddings:\n msg = \"Failed to generate embeddings for any model\"\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n if self._cached_vector_store is None:\n self.build_vector_store()\n \n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n \n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" }, "docs_metadata": { "_input_type": "TableInput", @@ -3871,7 +3906,7 @@ "trace_as_metadata": true, "track_in_telemetry": false, "type": "str", - "value": "langflow" + "value": "documents" }, "ingest_data": { "_input_type": "HandleInput", @@ -4016,7 +4051,7 @@ "trace_as_metadata": true, "track_in_telemetry": false, "type": "str", - "value": "http://localhost:9200" + "value": "https://opensearch:9200" }, "password": { "_input_type": "SecretStrInput", @@ -4235,7 +4270,7 @@ ], "frozen": false, "icon": "braces", - "last_updated": "2025-11-25T23:37:47.269Z", + "last_updated": "2025-11-26T06:11:22.857Z", "legacy": false, "lf_version": "1.6.3.dev0", "metadata": {}, @@ -4285,7 +4320,7 @@ "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "code": { @@ -4969,7 +5004,7 @@ "description": "Generate embeddings using a specified provider.", "display_name": "Embedding Model", "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": false, + "edited": true, "field_order": [ "provider", "api_base", @@ -4985,14 +5020,15 @@ "show_progress_bar", "model_kwargs", "truncate_input_tokens", - "input_text" + "input_text", + "fail_safe_mode" ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-25T23:39:18.966Z", + "last_updated": "2025-11-26T06:11:22.858Z", "legacy": false, "metadata": { - "code_hash": "9e44c83a5058", + "code_hash": "0e2d6fe67a26", "dependencies": { "dependencies": [ { @@ -5009,7 +5045,7 @@ }, { "name": "lfx", - "version": null + "version": "0.2.0.dev21" }, { "name": "langchain_ollama", @@ -5036,6 +5072,7 @@ "cache": true, "display_name": "Embedding Model", "group_outputs": false, + "hidden": null, "loop_types": null, "method": "build_embeddings", "name": "embeddings", @@ -5055,7 +5092,7 @@ "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "api_base": { @@ -5171,7 +5208,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"fail_safe_mode\",\n display_name=\"Fail-Safe Mode\",\n value=False,\n advanced=True,\n info=\"When enabled, errors will be logged instead of raising exceptions. \"\n \"The component will return None on error.\",\n real_time_refresh=True,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n async def fetch_ollama_models(self) -> list[str]:\n try:\n return await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n except Exception: # noqa: BLE001\n\n logger.exception(\"Error fetching models\")\n return []\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize OpenAI embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n try:\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await self.fetch_ollama_models()\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize Ollama embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to authenticate with IBM watsonx.ai: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n msg = f\"Unknown provider: {provider}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n # Handle fail_safe_mode changes first - set all required fields to False if enabled\n if field_name == \"fail_safe_mode\":\n if field_value: # If fail_safe_mode is enabled\n build_config[\"api_key\"][\"required\"] = False\n elif hasattr(self, \"provider\"):\n # If fail_safe_mode is disabled, restore required flags based on provider\n if self.provider in [\"OpenAI\", \"IBM watsonx.ai\"]:\n build_config[\"api_key\"][\"required\"] = True\n else: # Ollama\n build_config[\"api_key\"][\"required\"] = False\n\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" }, "dimensions": { "_input_type": "IntInput", @@ -5193,6 +5230,27 @@ "type": "int", "value": "" }, + "fail_safe_mode": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Fail-Safe Mode", + "dynamic": false, + "info": "When enabled, errors will be logged instead of raising exceptions. The component will return None on error.", + "list": false, + "list_add_label": "Add More", + "name": "fail_safe_mode", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, "input_text": { "_input_type": "BoolInput", "advanced": true, @@ -5243,6 +5301,7 @@ "dynamic": false, "external_options": {}, "info": "Select the embedding model to use", + "load_from_db": false, "name": "model", "options": [], "options_metadata": [], @@ -5340,6 +5399,7 @@ "dynamic": false, "external_options": {}, "info": "Select the embedding model provider", + "load_from_db": false, "name": "provider", "options": [ "OpenAI", @@ -5467,7 +5527,7 @@ "description": "Generate embeddings using a specified provider.", "display_name": "Embedding Model", "documentation": "https://docs.langflow.org/components-embedding-models", - "edited": false, + "edited": true, "field_order": [ "provider", "api_base", @@ -5483,14 +5543,15 @@ "show_progress_bar", "model_kwargs", "truncate_input_tokens", - "input_text" + "input_text", + "fail_safe_mode" ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-25T23:39:43.564Z", + "last_updated": "2025-11-26T06:11:22.860Z", "legacy": false, "metadata": { - "code_hash": "9e44c83a5058", + "code_hash": "0e2d6fe67a26", "dependencies": { "dependencies": [ { @@ -5507,7 +5568,7 @@ }, { "name": "lfx", - "version": null + "version": "0.2.0.dev21" }, { "name": "langchain_ollama", @@ -5534,6 +5595,7 @@ "cache": true, "display_name": "Embedding Model", "group_outputs": false, + "hidden": null, "loop_types": null, "method": "build_embeddings", "name": "embeddings", @@ -5553,7 +5615,7 @@ "value": "72c3d17c-2dac-4a73-b48a-6518473d7830" }, "_frontend_node_folder_id": { - "value": "dd32f417-a152-4076-b7cb-f0a44b2f19a4" + "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" }, "_type": "Component", "api_base": { @@ -5594,7 +5656,7 @@ "password": true, "placeholder": "", "real_time_refresh": true, - "required": true, + "required": false, "show": true, "title_case": false, "track_in_telemetry": false, @@ -5669,7 +5731,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" + "value": "from typing import Any\n\nimport requests\nfrom ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.embeddings_class import EmbeddingsWithModels\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import (\n IBM_WATSONX_URLS,\n WATSONX_EMBEDDING_MODEL_NAMES,\n)\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n real_time_refresh=True,\n refresh_button=True,\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n IntInput(\n name=\"truncate_input_tokens\",\n display_name=\"Truncate Input Tokens\",\n advanced=True,\n value=200,\n show=False,\n ),\n BoolInput(\n name=\"input_text\",\n display_name=\"Include the original text in the output\",\n value=True,\n advanced=True,\n show=False,\n ),\n BoolInput(\n name=\"fail_safe_mode\",\n display_name=\"Fail-Safe Mode\",\n value=False,\n advanced=True,\n info=\"When enabled, errors will be logged instead of raising exceptions. \"\n \"The component will return None on error.\",\n real_time_refresh=True,\n ),\n ]\n\n @staticmethod\n def fetch_ibm_models(base_url: str) -> list[str]:\n \"\"\"Fetch available models from the watsonx.ai API.\"\"\"\n try:\n endpoint = f\"{base_url}/ml/v1/foundation_model_specs\"\n params = {\n \"version\": \"2024-09-16\",\n \"filters\": \"function_embedding,!lifecycle_withdrawn:and\",\n }\n response = requests.get(endpoint, params=params, timeout=10)\n response.raise_for_status()\n data = response.json()\n models = [model[\"model_id\"] for model in data.get(\"resources\", [])]\n return sorted(models)\n except Exception: # noqa: BLE001\n logger.exception(\"Error fetching models\")\n return WATSONX_EMBEDDING_MODEL_NAMES\n async def fetch_ollama_models(self) -> list[str]:\n try:\n return await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n except Exception: # noqa: BLE001\n\n logger.exception(\"Error fetching models\")\n return []\n async def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n # Create the primary embedding instance\n embeddings_instance = OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in OPENAI_EMBEDDING_MODEL_NAMES:\n available_models_dict[model_name] = OpenAIEmbeddings(\n model=model_name,\n dimensions=dimensions or None, # Use same dimensions config for all\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize OpenAI embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n try:\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n final_base_url = transformed_base_url or \"http://localhost:11434\"\n\n # Create the primary embedding instance\n embeddings_instance = OllamaEmbeddings(\n model=model,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n # Fetch available Ollama models\n available_model_names = await self.fetch_ollama_models()\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = OllamaEmbeddings(\n model=model_name,\n base_url=final_base_url,\n **model_kwargs,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to initialize Ollama embeddings: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n try:\n from ibm_watsonx_ai import APIClient, Credentials\n\n final_url = base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\"\n\n credentials = Credentials(\n api_key=self.api_key,\n url=final_url,\n )\n\n api_client = APIClient(credentials)\n\n params = {\n EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,\n EmbedTextParamsMetaNames.RETURN_OPTIONS: {\"input_text\": self.input_text},\n }\n\n # Create the primary embedding instance\n embeddings_instance = WatsonxEmbeddings(\n model_id=model,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n # Fetch available IBM watsonx.ai models\n available_model_names = self.fetch_ibm_models(final_url)\n\n # Create dedicated instances for each available model\n available_models_dict = {}\n for model_name in available_model_names:\n available_models_dict[model_name] = WatsonxEmbeddings(\n model_id=model_name,\n params=params,\n watsonx_client=api_client,\n project_id=project_id,\n )\n\n return EmbeddingsWithModels(\n embeddings=embeddings_instance,\n available_models=available_models_dict,\n )\n except Exception as e:\n msg = f\"Failed to authenticate with IBM watsonx.ai: {e}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise\n\n msg = f\"Unknown provider: {provider}\"\n if self.fail_safe_mode:\n logger.error(msg)\n return None\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n # Handle fail_safe_mode changes first - set all required fields to False if enabled\n if field_name == \"fail_safe_mode\":\n if field_value: # If fail_safe_mode is enabled\n build_config[\"api_key\"][\"required\"] = False\n elif hasattr(self, \"provider\"):\n # If fail_safe_mode is disabled, restore required flags based on provider\n if self.provider in [\"OpenAI\", \"IBM watsonx.ai\"]:\n build_config[\"api_key\"][\"required\"] = True\n else: # Ollama\n build_config[\"api_key\"][\"required\"] = False\n\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n build_config[\"truncate_input_tokens\"][\"show\"] = False\n build_config[\"input_text\"][\"show\"] = False\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n # Only set required=True if fail_safe_mode is not enabled\n build_config[\"api_key\"][\"required\"] = not (hasattr(self, \"fail_safe_mode\") and self.fail_safe_mode)\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n build_config[\"truncate_input_tokens\"][\"show\"] = True\n build_config[\"input_text\"][\"show\"] = True\n elif field_name == \"base_url_ibm_watsonx\":\n build_config[\"model\"][\"options\"] = self.fetch_ibm_models(base_url=field_value)\n build_config[\"model\"][\"value\"] = self.fetch_ibm_models(base_url=field_value)[0]\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await self.fetch_ollama_models()\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n" }, "dimensions": { "_input_type": "IntInput", @@ -5691,6 +5753,27 @@ "type": "int", "value": "" }, + "fail_safe_mode": { + "_input_type": "BoolInput", + "advanced": true, + "display_name": "Fail-Safe Mode", + "dynamic": false, + "info": "When enabled, errors will be logged instead of raising exceptions. The component will return None on error.", + "list": false, + "list_add_label": "Add More", + "name": "fail_safe_mode", + "override_skip": false, + "placeholder": "", + "real_time_refresh": true, + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "track_in_telemetry": true, + "type": "bool", + "value": true + }, "input_text": { "_input_type": "BoolInput", "advanced": true, @@ -5741,6 +5824,7 @@ "dynamic": false, "external_options": {}, "info": "Select the embedding model to use", + "load_from_db": false, "name": "model", "options": [ "ibm/granite-embedding-278m-multilingual", @@ -5844,6 +5928,7 @@ "dynamic": false, "external_options": {}, "info": "Select the embedding model provider", + "load_from_db": false, "name": "provider", "options": [ "OpenAI", @@ -5958,17 +6043,17 @@ } ], "viewport": { - "x": -223.95247595079695, - "y": -67.29219204632898, - "zoom": 0.36800352363368594 + "x": -907.3774606121137, + "y": -579.4662259269908, + "zoom": 0.6003073886978839 } }, "description": "This flow is to ingest the URL to open search.", "endpoint_name": null, "id": "72c3d17c-2dac-4a73-b48a-6518473d7830", "is_component": false, - "last_tested_version": "1.7.0", "mcp_enabled": true, + "last_tested_version": "1.7.0.dev21", "name": "OpenSearch URL Ingestion Flow", "tags": [ "openai", diff --git a/src/api/settings.py b/src/api/settings.py index 8cde2c3b..178778cd 100644 --- a/src/api/settings.py +++ b/src/api/settings.py @@ -14,6 +14,7 @@ from config.settings import ( clients, get_openrag_config, config_manager, + is_no_auth_mode, ) from api.provider_validation import validate_provider_setup @@ -637,6 +638,41 @@ async def update_settings(request, session_manager): logger.info( f"Set SELECTED_EMBEDDING_MODEL global variable to {current_config.knowledge.embedding_model}" ) + + # Update MCP servers with provider credentials + try: + from services.langflow_mcp_service import LangflowMCPService + from utils.langflow_headers import build_mcp_global_vars_from_config + + mcp_service = LangflowMCPService() + + # Build global vars using utility function + mcp_global_vars = build_mcp_global_vars_from_config(current_config) + + # In no-auth mode, add the anonymous JWT token and user details + if is_no_auth_mode() and session_manager: + from session_manager import AnonymousUser + + # Create/get anonymous JWT for no-auth mode + anonymous_jwt = session_manager.get_effective_jwt_token(None, None) + if anonymous_jwt: + mcp_global_vars["JWT"] = anonymous_jwt + + # Add anonymous user details + anonymous_user = AnonymousUser() + mcp_global_vars["OWNER"] = anonymous_user.user_id # "anonymous" + mcp_global_vars["OWNER_NAME"] = f'"{anonymous_user.name}"' # "Anonymous User" (quoted) + mcp_global_vars["OWNER_EMAIL"] = anonymous_user.email # "anonymous@localhost" + + logger.debug("Added anonymous JWT and user details to MCP servers for no-auth mode") + + if mcp_global_vars: + result = await mcp_service.update_mcp_servers_with_global_vars(mcp_global_vars) + logger.info("Updated MCP servers with provider credentials after settings change", **result) + + except Exception as mcp_error: + logger.warning(f"Failed to update MCP servers after settings change: {str(mcp_error)}") + # Don't fail the entire settings update if MCP update fails except Exception as e: logger.error(f"Failed to update Langflow settings: {str(e)}") @@ -655,7 +691,7 @@ async def update_settings(request, session_manager): ) -async def onboarding(request, flows_service): +async def onboarding(request, flows_service, session_manager=None): """Handle onboarding configuration setup""" try: # Get current configuration @@ -943,6 +979,41 @@ async def onboarding(request, flows_service): logger.info( f"Set SELECTED_EMBEDDING_MODEL global variable to {current_config.knowledge.embedding_model}" ) + + # Update MCP servers with provider credentials during onboarding + try: + from services.langflow_mcp_service import LangflowMCPService + from utils.langflow_headers import build_mcp_global_vars_from_config + + mcp_service = LangflowMCPService() + + # Build global vars using utility function + mcp_global_vars = build_mcp_global_vars_from_config(current_config) + + # In no-auth mode, add the anonymous JWT token and user details + if is_no_auth_mode() and session_manager: + from session_manager import AnonymousUser + + # Create/get anonymous JWT for no-auth mode + anonymous_jwt = session_manager.get_effective_jwt_token(None, None) + if anonymous_jwt: + mcp_global_vars["JWT"] = anonymous_jwt + + # Add anonymous user details + anonymous_user = AnonymousUser() + mcp_global_vars["OWNER"] = anonymous_user.user_id # "anonymous" + mcp_global_vars["OWNER_NAME"] = f'"{anonymous_user.name}"' # "Anonymous User" (quoted) + mcp_global_vars["OWNER_EMAIL"] = anonymous_user.email # "anonymous@localhost" + + logger.debug("Added anonymous JWT and user details to MCP servers for no-auth mode during onboarding") + + if mcp_global_vars: + result = await mcp_service.update_mcp_servers_with_global_vars(mcp_global_vars) + logger.info("Updated MCP servers with provider credentials during onboarding", **result) + + except Exception as mcp_error: + logger.warning(f"Failed to update MCP servers during onboarding: {str(mcp_error)}") + # Don't fail onboarding if MCP update fails except Exception as e: logger.error( diff --git a/src/main.py b/src/main.py index ca2f6b11..59584298 100644 --- a/src/main.py +++ b/src/main.py @@ -433,6 +433,55 @@ async def _ingest_default_documents_openrag(services, file_paths): ) +async def _update_mcp_servers_with_provider_credentials(services): + """Update MCP servers with provider credentials at startup. + + This is especially important for no-auth mode where users don't go through + the OAuth login flow that would normally set these credentials. + """ + try: + auth_service = services.get("auth_service") + session_manager = services.get("session_manager") + + if not auth_service or not auth_service.langflow_mcp_service: + logger.debug("MCP service not available, skipping credential update") + return + + config = get_openrag_config() + + # Build global vars with provider credentials using utility function + from utils.langflow_headers import build_mcp_global_vars_from_config + + global_vars = build_mcp_global_vars_from_config(config) + + # In no-auth mode, add the anonymous JWT token and user details + if is_no_auth_mode() and session_manager: + from session_manager import AnonymousUser + + # Create/get anonymous JWT for no-auth mode + anonymous_jwt = session_manager.get_effective_jwt_token(None, None) + if anonymous_jwt: + global_vars["JWT"] = anonymous_jwt + + # Add anonymous user details + anonymous_user = AnonymousUser() + global_vars["OWNER"] = anonymous_user.user_id # "anonymous" + global_vars["OWNER_NAME"] = f'"{anonymous_user.name}"' # "Anonymous User" (quoted for spaces) + global_vars["OWNER_EMAIL"] = anonymous_user.email # "anonymous@localhost" + + logger.info("Added anonymous JWT and user details to MCP servers for no-auth mode") + + if global_vars: + result = await auth_service.langflow_mcp_service.update_mcp_servers_with_global_vars(global_vars) + logger.info("Updated MCP servers with provider credentials at startup", **result) + else: + logger.debug("No provider credentials configured, skipping MCP server update") + + except Exception as e: + logger.warning("Failed to update MCP servers with provider credentials at startup", error=str(e)) + # Don't fail startup if MCP update fails + + async def startup_tasks(services): """Startup tasks""" logger.info("Starting startup tasks") @@ -445,6 +494,9 @@ async def startup_tasks(services): # Configure alerting security await configure_alerting_security() + + # Update MCP servers with provider credentials (especially important for no-auth mode) + await _update_mcp_servers_with_provider_credentials(services) async def initialize_services(): @@ -1052,7 +1104,11 @@ async def create_app(): Route( "/onboarding", require_auth(services["session_manager"])( - partial(settings.onboarding, flows_service=services["flows_service"]) + partial( + settings.onboarding, + flows_service=services["flows_service"], + session_manager=services["session_manager"] + ) ), methods=["POST"], ), diff --git a/src/services/auth_service.py b/src/services/auth_service.py index f58997ac..d27d1eb6 100644 --- a/src/services/auth_service.py +++ b/src/services/auth_service.py @@ -308,6 +308,16 @@ class AuthService: global_vars["OWNER_NAME"] = str(f"\"{owner_name}\"") if user_info.get("email"): global_vars["OWNER_EMAIL"] = user_info.get("email") + + # Add provider credentials to MCP servers using utility function + from config.settings import get_openrag_config + from utils.langflow_headers import build_mcp_global_vars_from_config + + config = get_openrag_config() + provider_vars = build_mcp_global_vars_from_config(config) + + # Merge provider credentials with user info + global_vars.update(provider_vars) # Run in background to avoid delaying login flow task = asyncio.create_task( diff --git a/src/utils/langflow_headers.py b/src/utils/langflow_headers.py index e97fd0c4..e3447e61 100644 --- a/src/utils/langflow_headers.py +++ b/src/utils/langflow_headers.py @@ -31,3 +31,41 @@ def add_provider_credentials_to_headers(headers: Dict[str, str], config) -> None ollama_endpoint = transform_localhost_url(config.providers.ollama.endpoint) headers["X-LANGFLOW-GLOBAL-VAR-OLLAMA_BASE_URL"] = str(ollama_endpoint) + +def build_mcp_global_vars_from_config(config) -> Dict[str, str]: + """Build MCP global variables dictionary from OpenRAG configuration. + + Args: + config: OpenRAGConfig object containing provider configurations + + Returns: + Dictionary of global variables for MCP servers (without X-Langflow-Global-Var prefix) + """ + global_vars = {} + + # Add OpenAI credentials + if config.providers.openai.api_key: + global_vars["OPENAI_API_KEY"] = config.providers.openai.api_key + + # Add Anthropic credentials + if config.providers.anthropic.api_key: + global_vars["ANTHROPIC_API_KEY"] = config.providers.anthropic.api_key + + # Add WatsonX credentials + if config.providers.watsonx.api_key: + global_vars["WATSONX_API_KEY"] = config.providers.watsonx.api_key + + if config.providers.watsonx.project_id: + global_vars["WATSONX_PROJECT_ID"] = config.providers.watsonx.project_id + + # Add Ollama endpoint (with localhost transformation) + if config.providers.ollama.endpoint: + ollama_endpoint = transform_localhost_url(config.providers.ollama.endpoint) + global_vars["OLLAMA_BASE_URL"] = ollama_endpoint + + # Add selected embedding model + if config.knowledge.embedding_model: + global_vars["SELECTED_EMBEDDING_MODEL"] = config.knowledge.embedding_model + + return global_vars + From c7427b1ad2989013231f146c5b89719b482d001a Mon Sep 17 00:00:00 2001 From: phact Date: Wed, 26 Nov 2025 02:31:44 -0500 Subject: [PATCH 22/43] use RagRunner for docker build step --- .github/workflows/build-multiarch.yml | 13 +++++++++---- pyproject.toml | 2 +- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build-multiarch.yml b/.github/workflows/build-multiarch.yml index f516b62d..f9a83400 100644 --- a/.github/workflows/build-multiarch.yml +++ b/.github/workflows/build-multiarch.yml @@ -62,7 +62,8 @@ jobs: tag: langflowai/openrag-backend platform: linux/arm64 arch: arm64 - runs-on: [self-hosted, linux, ARM64, langflow-ai-arm64-2] + #runs-on: [self-hosted, linux, ARM64, langflow-ai-arm64-2] + runs-on: RagRunner # frontend - image: frontend @@ -76,7 +77,8 @@ jobs: tag: langflowai/openrag-frontend platform: linux/arm64 arch: arm64 - runs-on: [self-hosted, linux, ARM64, langflow-ai-arm64-2] + #runs-on: [self-hosted, linux, ARM64, langflow-ai-arm64-2] + runs-on: RagRunner # langflow - image: langflow @@ -90,7 +92,8 @@ jobs: tag: langflowai/openrag-langflow platform: linux/arm64 arch: arm64 - runs-on: self-hosted + #runs-on: self-hosted + runs-on: RagRunner # opensearch - image: opensearch @@ -104,7 +107,9 @@ jobs: tag: langflowai/openrag-opensearch platform: linux/arm64 arch: arm64 - runs-on: [self-hosted, linux, ARM64, langflow-ai-arm64-2] + #runs-on: [self-hosted, linux, ARM64, langflow-ai-arm64-2] + #runs-on: self-hosted + runs-on: RagRunner runs-on: ${{ matrix.runs-on }} diff --git a/pyproject.toml b/pyproject.toml index b4003082..334e045e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "openrag" -version = "0.1.39-rc2" +version = "0.1.39-rc3" description = "Add your description here" readme = "README.md" requires-python = ">=3.13" From fefcede8b5645c9932a586ffffb2e69273e1ab73 Mon Sep 17 00:00:00 2001 From: phact Date: Wed, 26 Nov 2025 03:47:47 -0500 Subject: [PATCH 23/43] also proactively check native port --- src/tui/managers/docling_manager.py | 23 ++++++++++++++++ src/tui/screens/monitor.py | 13 +++++++++ src/tui/screens/welcome.py | 42 +++++++++++++++++++++++++++++ 3 files changed, 78 insertions(+) diff --git a/src/tui/managers/docling_manager.py b/src/tui/managers/docling_manager.py index 109cb7c1..7f7c2a78 100644 --- a/src/tui/managers/docling_manager.py +++ b/src/tui/managers/docling_manager.py @@ -143,6 +143,29 @@ class DoclingManager: self._external_process = False return False + def check_port_available(self) -> tuple[bool, Optional[str]]: + """Check if the native service port is available. + + Returns: + Tuple of (available, error_message) + """ + import socket + + try: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(0.5) + result = sock.connect_ex(('127.0.0.1', self._port)) + sock.close() + + if result == 0: + # Port is in use + return False, f"Port {self._port} is already in use" + return True, None + except Exception as e: + logger.debug(f"Error checking port {self._port}: {e}") + # If we can't check, assume it's available + return True, None + def get_status(self) -> Dict[str, Any]: """Get current status of docling serve.""" # Check for starting state first diff --git a/src/tui/screens/monitor.py b/src/tui/screens/monitor.py index 601566b7..d72ba619 100644 --- a/src/tui/screens/monitor.py +++ b/src/tui/screens/monitor.py @@ -415,6 +415,19 @@ class MonitorScreen(Screen): """Start docling serve.""" self.operation_in_progress = True try: + # Check for port conflicts before attempting to start + port_available, error_msg = self.docling_manager.check_port_available() + if not port_available: + self.notify( + f"Cannot start docling serve: {error_msg}. " + f"Please stop the conflicting service first.", + severity="error", + timeout=10 + ) + # Refresh to show current state + await self._refresh_services() + return + # Start the service (this sets _starting = True internally at the start) # Create task and let it begin executing (which sets the flag) start_task = asyncio.create_task(self.docling_manager.start()) diff --git a/src/tui/screens/welcome.py b/src/tui/screens/welcome.py index 64ad888a..673fae5b 100644 --- a/src/tui/screens/welcome.py +++ b/src/tui/screens/welcome.py @@ -385,6 +385,34 @@ class WelcomeScreen(Screen): async def _start_all_services(self) -> None: """Start all services: containers first, then native services.""" + # Check for port conflicts before attempting to start anything + conflicts = [] + + # Check container ports + if self.container_manager.is_available(): + ports_available, port_conflicts = await self.container_manager.check_ports_available() + if not ports_available: + for service_name, port, error_msg in port_conflicts[:3]: # Show first 3 + conflicts.append(f"{service_name} (port {port})") + if len(port_conflicts) > 3: + conflicts.append(f"and {len(port_conflicts) - 3} more") + + # Check native service port + port_available, error_msg = self.docling_manager.check_port_available() + if not port_available: + conflicts.append(f"docling (port {self.docling_manager._port})") + + # If there are any conflicts, show error and return + if conflicts: + conflict_str = ", ".join(conflicts) + self.notify( + f"Cannot start services: Port conflicts detected for {conflict_str}. " + f"Please stop the conflicting services first.", + severity="error", + timeout=10 + ) + return + # Step 1: Start container services first (to create the network) if self.container_manager.is_available(): command_generator = self.container_manager.start_services() @@ -410,6 +438,20 @@ class WelcomeScreen(Screen): async def _start_native_services_after_containers(self) -> None: """Start native services after containers have been started.""" if not self.docling_manager.is_running(): + # Check for port conflicts before attempting to start + port_available, error_msg = self.docling_manager.check_port_available() + if not port_available: + self.notify( + f"Cannot start native services: {error_msg}. " + f"Please stop the conflicting service first.", + severity="error", + timeout=10 + ) + # Update state and return + self.docling_running = False + await self._refresh_welcome_content() + return + self.notify("Starting native services...", severity="information") success, message = await self.docling_manager.start() if success: From 58528dc541cf05b8c5072dd85656d2190d43dd98 Mon Sep 17 00:00:00 2001 From: phact Date: Wed, 26 Nov 2025 03:04:53 -0500 Subject: [PATCH 24/43] clean up runner --- .github/workflows/test-integration.yml | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test-integration.yml b/.github/workflows/test-integration.yml index 1b4c6162..28641819 100644 --- a/.github/workflows/test-integration.yml +++ b/.github/workflows/test-integration.yml @@ -31,11 +31,14 @@ jobs: steps: - run: df -h - #- name: "node-cleanup" - #run: | - # sudo rm -rf /usr/share/dotnet /usr/local/lib/android /opt/ghc /opt/hostedtoolcache/CodeQL - # sudo docker image prune --all --force - # sudo docker builder prune -a + + - name: Clean runner workspace + run: | + # Clean old job workspaces + rm -rf ${GITHUB_WORKSPACE}/../../../_work/* || true + # Clean runner logs + rm -rf ${GITHUB_WORKSPACE}/../../../_diag/* || true + - run: df -h - name: Checkout uses: actions/checkout@v4 From c276060cb1e0fbc15f272fdea1149c483e07929e Mon Sep 17 00:00:00 2001 From: phact Date: Wed, 26 Nov 2025 03:53:43 -0500 Subject: [PATCH 25/43] fix cleanup --- .github/workflows/test-integration.yml | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test-integration.yml b/.github/workflows/test-integration.yml index 28641819..2614699c 100644 --- a/.github/workflows/test-integration.yml +++ b/.github/workflows/test-integration.yml @@ -34,14 +34,23 @@ jobs: - name: Clean runner workspace run: | - # Clean old job workspaces - rm -rf ${GITHUB_WORKSPACE}/../../../_work/* || true - # Clean runner logs - rm -rf ${GITHUB_WORKSPACE}/../../../_diag/* || true + # Save current workspace path + CURRENT_WORKSPACE="${GITHUB_WORKSPACE}" + # Clean old job workspaces except current one + find ${GITHUB_WORKSPACE}/../../../_work -maxdepth 2 -type d -mtime +1 -not -path "${CURRENT_WORKSPACE}*" -exec rm -rf {} + 2>/dev/null || true + # Clean runner logs older than 1 day + find ${GITHUB_WORKSPACE}/../../../_diag -type f -mtime +1 -delete 2>/dev/null || true - run: df -h + - name: Checkout uses: actions/checkout@v4 + + - name: Verify workspace + run: | + echo "Current directory: $(pwd)" + echo "Workspace: ${GITHUB_WORKSPACE}" + ls -la - name: Set up UV uses: astral-sh/setup-uv@v3 From bd36727eb018abe78fdb6647e3f9959a22d73de3 Mon Sep 17 00:00:00 2001 From: phact Date: Wed, 26 Nov 2025 03:58:58 -0500 Subject: [PATCH 26/43] cut v0.1.40 --- pyproject.toml | 2 +- uv.lock | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 334e045e..faa5820b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "openrag" -version = "0.1.39-rc3" +version = "0.1.40" description = "Add your description here" readme = "README.md" requires-python = ">=3.13" diff --git a/uv.lock b/uv.lock index e99e9dc8..788cb852 100644 --- a/uv.lock +++ b/uv.lock @@ -2352,7 +2352,7 @@ wheels = [ [[package]] name = "openrag" -version = "0.1.37" +version = "0.1.40" source = { editable = "." } dependencies = [ { name = "agentd" }, From a5c21e1ffca9d09b12060dba92e25f74debf76a3 Mon Sep 17 00:00:00 2001 From: phact Date: Wed, 26 Nov 2025 04:04:00 -0500 Subject: [PATCH 27/43] du --- .github/workflows/test-integration.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/workflows/test-integration.yml b/.github/workflows/test-integration.yml index 2614699c..80d2d169 100644 --- a/.github/workflows/test-integration.yml +++ b/.github/workflows/test-integration.yml @@ -32,6 +32,14 @@ jobs: steps: - run: df -h + - name: Disk space troubleshooting + run: | + echo "=== Top-level disk usage ===" + sudo du -h -d1 / 2>/dev/null | sort -h + echo "" + echo "=== Docker disk usage ===" + docker system df + - name: Clean runner workspace run: | # Save current workspace path From bd0ea37253792e83a1f346cf06e5cf94f87a1301 Mon Sep 17 00:00:00 2001 From: phact Date: Wed, 26 Nov 2025 04:08:28 -0500 Subject: [PATCH 28/43] integration tests docker cache --- .github/workflows/test-integration.yml | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/.github/workflows/test-integration.yml b/.github/workflows/test-integration.yml index 80d2d169..92c9a976 100644 --- a/.github/workflows/test-integration.yml +++ b/.github/workflows/test-integration.yml @@ -32,23 +32,11 @@ jobs: steps: - run: df -h - - name: Disk space troubleshooting + - name: Cleanup Docker cache run: | - echo "=== Top-level disk usage ===" - sudo du -h -d1 / 2>/dev/null | sort -h - echo "" - echo "=== Docker disk usage ===" - docker system df - - - name: Clean runner workspace - run: | - # Save current workspace path - CURRENT_WORKSPACE="${GITHUB_WORKSPACE}" - # Clean old job workspaces except current one - find ${GITHUB_WORKSPACE}/../../../_work -maxdepth 2 -type d -mtime +1 -not -path "${CURRENT_WORKSPACE}*" -exec rm -rf {} + 2>/dev/null || true - # Clean runner logs older than 1 day - find ${GITHUB_WORKSPACE}/../../../_diag -type f -mtime +1 -delete 2>/dev/null || true - + docker system prune -af || true + docker builder prune -af || true + - run: df -h - name: Checkout From 67e31dbbe42f25959c8de5e9c87895993f0ebd97 Mon Sep 17 00:00:00 2001 From: phact Date: Wed, 26 Nov 2025 06:01:04 -0500 Subject: [PATCH 29/43] symlinks --- src/tui/_assets/docker-compose-cpu.yml | 1 - src/tui/_assets/docker-compose.gpu.yml | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) delete mode 120000 src/tui/_assets/docker-compose-cpu.yml create mode 120000 src/tui/_assets/docker-compose.gpu.yml diff --git a/src/tui/_assets/docker-compose-cpu.yml b/src/tui/_assets/docker-compose-cpu.yml deleted file mode 120000 index 5ad7a663..00000000 --- a/src/tui/_assets/docker-compose-cpu.yml +++ /dev/null @@ -1 +0,0 @@ -../../../docker-compose-cpu.yml \ No newline at end of file diff --git a/src/tui/_assets/docker-compose.gpu.yml b/src/tui/_assets/docker-compose.gpu.yml new file mode 120000 index 00000000..bfebbedd --- /dev/null +++ b/src/tui/_assets/docker-compose.gpu.yml @@ -0,0 +1 @@ +../../../docker-compose.gpu.yml \ No newline at end of file From 71eaa1c1e2d9e1468350e06ae1e6e6e222b38381 Mon Sep 17 00:00:00 2001 From: phact Date: Wed, 26 Nov 2025 06:17:20 -0500 Subject: [PATCH 30/43] monitor actually displays and sticks across navigation --- src/tui/screens/monitor.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/src/tui/screens/monitor.py b/src/tui/screens/monitor.py index 99e7a040..7d7269ab 100644 --- a/src/tui/screens/monitor.py +++ b/src/tui/screens/monitor.py @@ -33,13 +33,14 @@ class MonitorScreen(Screen): ("u", "upgrade", "Upgrade"), ("x", "reset", "Reset"), ("l", "logs", "View Logs"), + ("g", "toggle_mode", "Toggle GPU/CPU"), ("j", "cursor_down", "Move Down"), ("k", "cursor_up", "Move Up"), ] def __init__(self): super().__init__() - self.container_manager = ContainerManager() + self._container_manager = None # Use app's shared instance self.docling_manager = DoclingManager() self.services_table = None self.docling_table = None @@ -52,6 +53,13 @@ class MonitorScreen(Screen): # Track which table was last selected for mutual exclusion self._last_selected_table = None + @property + def container_manager(self) -> ContainerManager: + """Get the shared container manager from the app.""" + if self._container_manager is None: + self._container_manager = self.app.container_manager + return self._container_manager + def on_unmount(self) -> None: """Clean up when the screen is unmounted.""" if hasattr(self, 'docling_manager'): @@ -69,10 +77,10 @@ class MonitorScreen(Screen): def _create_services_tab(self) -> ComposeResult: """Create the services monitoring tab.""" - # Current mode indicator + toggle + # GPU/CPU mode section + yield Static("GPU Mode", id="mode-indicator", classes="tab-header") yield Horizontal( - Static("", id="mode-indicator"), - Button("Toggle Mode", id="toggle-mode-btn"), + Button("Switch to CPU Mode", id="toggle-mode-btn"), classes="button-row", id="mode-row", ) @@ -583,8 +591,7 @@ class MonitorScreen(Screen): try: use_gpu = getattr(self.container_manager, "use_gpu_compose", False) indicator = self.query_one("#mode-indicator", Static) - mode_text = "Mode: GPU" if use_gpu else "Mode: CPU (no GPU detected)" - indicator.update(mode_text) + indicator.update("GPU Mode" if use_gpu else "CPU Mode") toggle_btn = self.query_one("#toggle-mode-btn", Button) toggle_btn.label = "Switch to CPU Mode" if use_gpu else "Switch to GPU Mode" except Exception: From 24c006128cbacda15f37bd8b87c345648363aea5 Mon Sep 17 00:00:00 2001 From: phact Date: Wed, 26 Nov 2025 06:31:15 -0500 Subject: [PATCH 31/43] remove the secret --- docker-compose.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index 2627aa14..7954d1bc 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -56,7 +56,6 @@ services: - LANGFLOW_URL=http://langflow:7860 - LANGFLOW_PUBLIC_URL=${LANGFLOW_PUBLIC_URL} - LANGFLOW_AUTO_LOGIN=${LANGFLOW_AUTO_LOGIN} - - LANGFLOW_SECRET_KEY=${LANGFLOW_SECRET_KEY} - LANGFLOW_SUPERUSER=${LANGFLOW_SUPERUSER} - LANGFLOW_SUPERUSER_PASSWORD=${LANGFLOW_SUPERUSER_PASSWORD} - LANGFLOW_CHAT_FLOW_ID=${LANGFLOW_CHAT_FLOW_ID} From d1b7a36801b341f23596816233f86b256cb4f541 Mon Sep 17 00:00:00 2001 From: phact Date: Wed, 26 Nov 2025 06:32:38 -0500 Subject: [PATCH 32/43] integration test cleanup --- .github/workflows/test-integration.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/test-integration.yml b/.github/workflows/test-integration.yml index 92c9a976..0ef50684 100644 --- a/.github/workflows/test-integration.yml +++ b/.github/workflows/test-integration.yml @@ -39,6 +39,9 @@ jobs: - run: df -h + - name: Cleanup + run: sudo rm -rf opensearch-data + - name: Checkout uses: actions/checkout@v4 From 9e8936e2b84933af099cd36b56d1b7a92526f86e Mon Sep 17 00:00:00 2001 From: phact Date: Wed, 26 Nov 2025 06:33:51 -0500 Subject: [PATCH 33/43] comment - integration test cleanup --- .github/workflows/test-integration.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test-integration.yml b/.github/workflows/test-integration.yml index 0ef50684..635552f0 100644 --- a/.github/workflows/test-integration.yml +++ b/.github/workflows/test-integration.yml @@ -39,8 +39,8 @@ jobs: - run: df -h - - name: Cleanup - run: sudo rm -rf opensearch-data + #- name: Cleanup + # run: sudo rm -rf opensearch-data - name: Checkout uses: actions/checkout@v4 From 5df25e21fe935451082110c2341bed2f010cdb88 Mon Sep 17 00:00:00 2001 From: phact Date: Wed, 26 Nov 2025 06:35:36 -0500 Subject: [PATCH 34/43] chown instead --- .github/workflows/test-integration.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test-integration.yml b/.github/workflows/test-integration.yml index 635552f0..aab34d2a 100644 --- a/.github/workflows/test-integration.yml +++ b/.github/workflows/test-integration.yml @@ -39,8 +39,8 @@ jobs: - run: df -h - #- name: Cleanup - # run: sudo rm -rf opensearch-data + - name: Fix permissions for opensearch-data + run: sudo chown -R $(whoami) opensearch-data && chmod -R u+w opensearch-data - name: Checkout uses: actions/checkout@v4 From b92f4fdfb3ae4480e9d51bb1977245d8d27c01d4 Mon Sep 17 00:00:00 2001 From: phact Date: Wed, 26 Nov 2025 06:40:30 -0500 Subject: [PATCH 35/43] rm full path --- .github/workflows/test-integration.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-integration.yml b/.github/workflows/test-integration.yml index aab34d2a..604f608a 100644 --- a/.github/workflows/test-integration.yml +++ b/.github/workflows/test-integration.yml @@ -40,7 +40,7 @@ jobs: - run: df -h - name: Fix permissions for opensearch-data - run: sudo chown -R $(whoami) opensearch-data && chmod -R u+w opensearch-data + run: sudo rm -rf /opt/actions-runner/_work/openrag/openrag/opensearch-data/* - name: Checkout uses: actions/checkout@v4 From 157cebff4e705b9af97fdf2ce608e439a514e920 Mon Sep 17 00:00:00 2001 From: phact Date: Wed, 26 Nov 2025 06:43:50 -0500 Subject: [PATCH 36/43] rm with docker compose --- .github/workflows/test-integration.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/test-integration.yml b/.github/workflows/test-integration.yml index 604f608a..a70dd24d 100644 --- a/.github/workflows/test-integration.yml +++ b/.github/workflows/test-integration.yml @@ -36,12 +36,10 @@ jobs: run: | docker system prune -af || true docker builder prune -af || true + docker-compose -f docker-compose.yml down -v --remove-orphans || true - run: df -h - - name: Fix permissions for opensearch-data - run: sudo rm -rf /opt/actions-runner/_work/openrag/openrag/opensearch-data/* - - name: Checkout uses: actions/checkout@v4 From c6e6c80bf6154bda5cf1e775a8764c6aa7526b02 Mon Sep 17 00:00:00 2001 From: Mike Fortman Date: Wed, 26 Nov 2025 10:59:58 -0600 Subject: [PATCH 37/43] Adds loading state and placeholder for new conversations --- frontend/components/navigation.tsx | 73 ++++++++++++++++++++++++++++-- 1 file changed, 70 insertions(+), 3 deletions(-) diff --git a/frontend/components/navigation.tsx b/frontend/components/navigation.tsx index 4efd1403..b172779e 100644 --- a/frontend/components/navigation.tsx +++ b/frontend/components/navigation.tsx @@ -91,6 +91,10 @@ export function Navigation({ const { loading } = useLoadingStore(); + useEffect(() => { + console.log("loading", loading); + }, [loading]); + const [previousConversationCount, setPreviousConversationCount] = useState(0); const [deleteModalOpen, setDeleteModalOpen] = useState(false); const [conversationToDelete, setConversationToDelete] = @@ -391,8 +395,70 @@ export function Navigation({ No conversations yet ) : ( - conversations.map((conversation) => ( - + ) + ); + })()} + {conversations.map((conversation) => ( + - )) + ))} + )} )} From 53139ecb8a2e3d2087b97aae4b353dc04d6715d4 Mon Sep 17 00:00:00 2001 From: Edwin Jose Date: Wed, 26 Nov 2025 13:55:49 -0500 Subject: [PATCH 38/43] Enable local builds and env embedding model config in compose files Uncommented build sections for backend, frontend, and langflow services in docker-compose.yml to allow local image builds. Updated SELECTED_EMBEDDING_MODEL to use environment variable in both docker-compose.yml and docker-compose-cpu.yml for improved configurability. --- docker-compose-cpu.yml | 2 +- docker-compose.yml | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/docker-compose-cpu.yml b/docker-compose-cpu.yml index 825e04d7..45aaee01 100644 --- a/docker-compose-cpu.yml +++ b/docker-compose-cpu.yml @@ -129,7 +129,7 @@ services: - FILENAME=None - MIMETYPE=None - FILESIZE=0 - - SELECTED_EMBEDDING_MODEL=text-embedding-3-small + - SELECTED_EMBEDDING_MODEL=${SELECTED_EMBEDDING_MODEL:-} - LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,FILENAME,MIMETYPE,FILESIZE,SELECTED_EMBEDDING_MODEL - LANGFLOW_LOG_LEVEL=DEBUG - LANGFLOW_AUTO_LOGIN=${LANGFLOW_AUTO_LOGIN} diff --git a/docker-compose.yml b/docker-compose.yml index c28bb130..a3371a8e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -45,9 +45,9 @@ services: openrag-backend: image: langflowai/openrag-backend:${OPENRAG_VERSION:-latest} - # build: - # context: . - # dockerfile: Dockerfile.backend + build: + context: . + dockerfile: Dockerfile.backend container_name: openrag-backend depends_on: - langflow @@ -86,9 +86,9 @@ services: openrag-frontend: image: langflowai/openrag-frontend:${OPENRAG_VERSION:-latest} - # build: - # context: . - # dockerfile: Dockerfile.frontend + build: + context: . + dockerfile: Dockerfile.frontend container_name: openrag-frontend depends_on: - openrag-backend @@ -101,9 +101,9 @@ services: volumes: - ./flows:/app/flows:U,z image: langflowai/openrag-langflow:${LANGFLOW_VERSION:-latest} - # build: - # context: . - # dockerfile: Dockerfile.langflow + build: + context: . + dockerfile: Dockerfile.langflow container_name: langflow ports: - "7860:7860" @@ -128,7 +128,7 @@ services: - FILENAME=None - MIMETYPE=None - FILESIZE=0 - - SELECTED_EMBEDDING_MODEL=text-embedding-3-small + - SELECTED_EMBEDDING_MODEL=${SELECTED_EMBEDDING_MODEL:-} - OPENSEARCH_PASSWORD=${OPENSEARCH_PASSWORD} - LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,FILENAME,MIMETYPE,FILESIZE,SELECTED_EMBEDDING_MODEL,OPENAI_API_KEY,ANTHROPIC_API_KEY,WATSONX_API_KEY,WATSONX_ENDPOINT,WATSONX_PROJECT_ID,OLLAMA_BASE_URL - LANGFLOW_LOG_LEVEL=DEBUG From d6466db5a1b15c421cf30b5713a5490aee7213ef Mon Sep 17 00:00:00 2001 From: Mike Fortman Date: Wed, 26 Nov 2025 12:59:40 -0600 Subject: [PATCH 39/43] change documents directory --- docker-compose-cpu.yml | 2 +- docker-compose.yml | 2 +- docs/README.md | 4 ++-- docs/docs/core-components/knowledge.mdx | 2 +- docs/docs/get-started/docker.mdx | 2 +- docs/docs/get-started/quickstart.mdx | 2 +- docs/docs/reference/configuration.mdx | 2 +- docs/package.json | 2 +- docs/scraper.config.json | 2 +- {documents => openrag-documents}/docling.pdf | Bin .../ibm_anthropic.pdf | Bin .../openrag-documentation.pdf | Bin .../warmup_ocr.pdf | Bin src/main.py | 19 +++++++++++++++++- src/tui/main.py | 2 +- src/tui/managers/env_manager.py | 6 +++--- src/tui/screens/config.py | 2 +- tests/integration/test_startup_ingest.py | 2 +- 18 files changed, 34 insertions(+), 17 deletions(-) rename {documents => openrag-documents}/docling.pdf (100%) rename {documents => openrag-documents}/ibm_anthropic.pdf (100%) rename {documents => openrag-documents}/openrag-documentation.pdf (100%) rename {documents => openrag-documents}/warmup_ocr.pdf (100%) diff --git a/docker-compose-cpu.yml b/docker-compose-cpu.yml index c5cd15f3..be882429 100644 --- a/docker-compose-cpu.yml +++ b/docker-compose-cpu.yml @@ -81,7 +81,7 @@ services: - AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} - AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} volumes: - - ./documents:/app/documents:Z + - ./openrag-documents:/app/documents:Z - ./keys:/app/keys:Z - ./flows:/app/flows:U,z diff --git a/docker-compose.yml b/docker-compose.yml index a74d3c12..e3fded53 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -80,7 +80,7 @@ services: - AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} - AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} volumes: - - ./documents:/app/documents:Z + - ./openrag-documents:/app/documents:Z - ./keys:/app/keys:Z - ./flows:/app/flows:U,z diff --git a/docs/README.md b/docs/README.md index 436d7830..bad5759c 100644 --- a/docs/README.md +++ b/docs/README.md @@ -42,7 +42,7 @@ If you are using GitHub pages for hosting, this command is a convenient way to b ## Update the OpenRAG documentation PDF -The documentation PDF at `openrag/documents/openrag-documentation.pdf` is used by the OpenRAG application, so keep it up to date. +The documentation PDF at `openrag/openrag-documents/openrag-documentation.pdf` is used by the OpenRAG application, so keep it up to date. To update the PDF, do the following: @@ -68,7 +68,7 @@ To remove these items, give the following prompt or something similar to your ID 2. Check your `.mdx` files to confirm these elements are removed. Don't commit the changes. -3. From `openrag/docs`, run this command to build the site with the changes, and create a PDF at `openrag/documents`. +3. From `openrag/docs`, run this command to build the site with the changes, and create a PDF at `openrag/openrag-documents`. ``` npm run build:pdf diff --git a/docs/docs/core-components/knowledge.mdx b/docs/docs/core-components/knowledge.mdx index 80a997c2..63edbaa3 100644 --- a/docs/docs/core-components/knowledge.mdx +++ b/docs/docs/core-components/knowledge.mdx @@ -29,7 +29,7 @@ To configure the knowledge ingestion pipeline parameters, see [Docling Ingestion The **Knowledge Ingest** flow uses Langflow's [**File** component](https://docs.langflow.org/components-data#file) to split and embed files loaded from your local machine into the OpenSearch database. -The default path to your local folder is mounted from the `./documents` folder in your OpenRAG project directory to the `/app/documents/` directory inside the Docker container. Files added to the host or the container will be visible in both locations. To configure this location, modify the **Documents Paths** variable in either the TUI's [Advanced Setup](/install#setup) menu or in the `.env` used by Docker Compose. +The default path to your local folder is mounted from the `./openrag-documents` folder in your OpenRAG project directory to the `/app/documents/` directory inside the Docker container. Files added to the host or the container will be visible in both locations. To configure this location, modify the **Documents Paths** variable in either the TUI's [Advanced Setup](/install#setup) menu or in the `.env` used by Docker Compose. To load and process a single file from the mapped location, click **Add Knowledge**, and then click