openrag/flows/openrag_ingest_docling.json
Lucas Oliveira e0015f35db
fix: update onboarding design, make opensearch index be initialized after onboarding, make flow reset change the models to the provider chosen (#100)
* changed tooltip stype

* added start on label wrapper

* changed switch to checkbox on openai onboarding and changed copies

* made border be red when api key is invalid

* Added embedding configuration after onboarding

* changed openrag ingest docling to have same embedding model component as other flows

* changed flows service to get flow by id, not by path

* modify reset_langflow to also put right embedding model

* added endpoint and project id to provider config

* added replacing the model with the provider model when resetting

* Moved consts to settings.py

* raise when flow_id is not found
2025-09-26 12:04:17 -03:00

1960 lines
No EOL
112 KiB
JSON
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

{
"data": {
"edges": [
{
"animated": false,
"className": "",
"data": {
"sourceHandle": {
"dataType": "SplitText",
"id": "SplitText-3ZI5B",
"name": "dataframe",
"output_types": [
"DataFrame"
]
},
"targetHandle": {
"fieldName": "ingest_data",
"id": "OpenSearchHybrid-XtKoA",
"inputTypes": [
"Data",
"DataFrame"
],
"type": "other"
}
},
"id": "reactflow__edge-SplitText-3ZI5B{œdataTypeœ:œSplitTextœ,œidœ:œSplitText-3ZI5Bœ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}-OpenSearchHybrid-XtKoA{œfieldNameœ:œingest_dataœ,œidœ:œOpenSearchHybrid-XtKoAœ,œinputTypesœ:[œDataœ,œDataFrameœ],œtypeœ:œotherœ}",
"selected": false,
"source": "SplitText-3ZI5B",
"sourceHandle": "{œdataTypeœ:œSplitTextœ,œidœ:œSplitText-3ZI5Bœ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}",
"target": "OpenSearchHybrid-XtKoA",
"targetHandle": "{œfieldNameœ:œingest_dataœ,œidœ:œOpenSearchHybrid-XtKoAœ,œinputTypesœ:[œDataœ,œDataFrameœ],œtypeœ:œotherœ}"
},
{
"animated": false,
"className": "",
"data": {
"sourceHandle": {
"dataType": "DoclingRemote",
"id": "DoclingRemote-78KoX",
"name": "dataframe",
"output_types": [
"DataFrame"
]
},
"targetHandle": {
"fieldName": "data_inputs",
"id": "ExportDoclingDocument-xFoCI",
"inputTypes": [
"Data",
"DataFrame"
],
"type": "other"
}
},
"id": "xy-edge__DoclingRemote-78KoX{œdataTypeœ:œDoclingRemoteœ,œidœ:œDoclingRemote-78KoXœ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}-ExportDoclingDocument-xFoCI{œfieldNameœ:œdata_inputsœ,œidœ:œExportDoclingDocument-xFoCIœ,œinputTypesœ:[œDataœ,œDataFrameœ],œtypeœ:œotherœ}",
"selected": false,
"source": "DoclingRemote-78KoX",
"sourceHandle": "{œdataTypeœ:œDoclingRemoteœ,œidœ:œDoclingRemote-78KoXœ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}",
"target": "ExportDoclingDocument-xFoCI",
"targetHandle": "{œfieldNameœ:œdata_inputsœ,œidœ:œExportDoclingDocument-xFoCIœ,œinputTypesœ:[œDataœ,œDataFrameœ],œtypeœ:œotherœ}"
},
{
"animated": false,
"className": "",
"data": {
"sourceHandle": {
"dataType": "ExportDoclingDocument",
"id": "ExportDoclingDocument-xFoCI",
"name": "data",
"output_types": [
"Data"
]
},
"targetHandle": {
"fieldName": "data_inputs",
"id": "SplitText-3ZI5B",
"inputTypes": [
"Data",
"DataFrame",
"Message"
],
"type": "other"
}
},
"id": "xy-edge__ExportDoclingDocument-xFoCI{œdataTypeœ:œExportDoclingDocumentœ,œidœ:œExportDoclingDocument-xFoCIœ,œnameœ:œdataœ,œoutput_typesœ:[œDataœ]}-SplitText-3ZI5B{œfieldNameœ:œdata_inputsœ,œidœ:œSplitText-3ZI5Bœ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}",
"selected": false,
"source": "ExportDoclingDocument-xFoCI",
"sourceHandle": "{œdataTypeœ:œExportDoclingDocumentœ,œidœ:œExportDoclingDocument-xFoCIœ,œnameœ:œdataœ,œoutput_typesœ:[œDataœ]}",
"target": "SplitText-3ZI5B",
"targetHandle": "{œfieldNameœ:œdata_inputsœ,œidœ:œSplitText-3ZI5Bœ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}"
},
{
"animated": false,
"className": "",
"data": {
"sourceHandle": {
"dataType": "EmbeddingModel",
"id": "EmbeddingModel-eZ6bT",
"name": "embeddings",
"output_types": [
"Embeddings"
]
},
"targetHandle": {
"fieldName": "embedding",
"id": "OpenSearchHybrid-XtKoA",
"inputTypes": [
"Embeddings"
],
"type": "other"
}
},
"id": "xy-edge__EmbeddingModel-eZ6bT{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-eZ6bTœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-OpenSearchHybrid-XtKoA{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchHybrid-XtKoAœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}",
"selected": false,
"source": "EmbeddingModel-eZ6bT",
"sourceHandle": "{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-eZ6bTœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}",
"target": "OpenSearchHybrid-XtKoA",
"targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchHybrid-XtKoAœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}"
}
],
"nodes": [
{
"data": {
"description": "Split text into chunks based on specified criteria.",
"display_name": "Split Text",
"id": "SplitText-3ZI5B",
"node": {
"base_classes": [
"DataFrame"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
"description": "Split text into chunks based on specified criteria.",
"display_name": "Split Text",
"documentation": "https://docs.langflow.org/components-processing#split-text",
"edited": true,
"field_order": [
"data_inputs",
"chunk_overlap",
"chunk_size",
"separator",
"text_key",
"keep_separator"
],
"frozen": false,
"icon": "scissors-line-dashed",
"legacy": false,
"lf_version": "1.6.0",
"metadata": {
"code_hash": "65a90e1f4fe6",
"dependencies": {
"dependencies": [
{
"name": "langchain_text_splitters",
"version": "0.3.9"
},
{
"name": "langflow",
"version": "1.5.0.post2"
}
],
"total_dependencies": 2
},
"module": "custom_components.split_text"
},
"minimized": false,
"output_types": [],
"outputs": [
{
"allows_loop": false,
"cache": true,
"display_name": "Chunks",
"group_outputs": false,
"hidden": null,
"method": "split_text",
"name": "dataframe",
"options": null,
"required_inputs": null,
"selected": "DataFrame",
"tool_mode": true,
"types": [
"DataFrame"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"template": {
"_type": "Component",
"chunk_overlap": {
"_input_type": "IntInput",
"advanced": false,
"display_name": "Chunk Overlap",
"dynamic": false,
"info": "Number of characters to overlap between chunks.",
"list": false,
"list_add_label": "Add More",
"name": "chunk_overlap",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "int",
"value": 200
},
"chunk_size": {
"_input_type": "IntInput",
"advanced": false,
"display_name": "Chunk Size",
"dynamic": false,
"info": "The maximum length of each chunk. Text is first split by separator, then chunks are merged up to this size. Individual splits larger than this won't be further divided.",
"list": false,
"list_add_label": "Add More",
"name": "chunk_size",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "int",
"value": 1000
},
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "from langchain_text_splitters import CharacterTextSplitter\n\nfrom langflow.custom.custom_component.component import Component\nfrom langflow.io import DropdownInput, HandleInput, IntInput, MessageTextInput, Output\nfrom langflow.schema.data import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\nfrom langflow.utils.util import unescape_string\n\n\nclass SplitTextComponent(Component):\n display_name: str = \"Split Text\"\n description: str = \"Split text into chunks based on specified criteria.\"\n documentation: str = \"https://docs.langflow.org/components-processing#split-text\"\n icon = \"scissors-line-dashed\"\n name = \"SplitText\"\n\n inputs = [\n HandleInput(\n name=\"data_inputs\",\n display_name=\"Input\",\n info=\"The data with texts to split in chunks.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n IntInput(\n name=\"chunk_overlap\",\n display_name=\"Chunk Overlap\",\n info=\"Number of characters to overlap between chunks.\",\n value=200,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n info=(\n \"The maximum length of each chunk. Text is first split by separator, \"\n \"then chunks are merged up to this size. \"\n \"Individual splits larger than this won't be further divided.\"\n ),\n value=1000,\n ),\n MessageTextInput(\n name=\"separator\",\n display_name=\"Separator\",\n info=(\n \"The character to split on. Use \\\\n for newline. \"\n \"Examples: \\\\n\\\\n for paragraphs, \\\\n for lines, . for sentences\"\n ),\n value=\"\\n\",\n ),\n MessageTextInput(\n name=\"text_key\",\n display_name=\"Text Key\",\n info=\"The key to use for the text column.\",\n value=\"text\",\n advanced=True,\n ),\n DropdownInput(\n name=\"keep_separator\",\n display_name=\"Keep Separator\",\n info=\"Whether to keep the separator in the output chunks and where to place it.\",\n options=[\"False\", \"True\", \"Start\", \"End\"],\n value=\"False\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Chunks\", name=\"dataframe\", method=\"split_text\"),\n ]\n\n def _docs_to_data(self, docs) -> list[Data]:\n data_list = [Data(text=doc.page_content, data=doc.metadata) for doc in docs]\n return data_list\n\n def _fix_separator(self, separator: str) -> str:\n \"\"\"Fix common separator issues and convert to proper format.\"\"\"\n if separator == \"/n\":\n return \"\\n\"\n if separator == \"/t\":\n return \"\\t\"\n return separator\n\n def split_text_base(self):\n separator = self._fix_separator(self.separator)\n separator = unescape_string(separator)\n\n if isinstance(self.data_inputs, DataFrame):\n if not len(self.data_inputs):\n msg = \"DataFrame is empty\"\n raise TypeError(msg)\n\n self.data_inputs.text_key = self.text_key\n try:\n documents = self.data_inputs.to_lc_documents()\n except Exception as e:\n msg = f\"Error converting DataFrame to documents: {e}\"\n raise TypeError(msg) from e\n elif isinstance(self.data_inputs, Message):\n self.data_inputs = [self.data_inputs.to_data()]\n return self.split_text_base()\n else:\n if not self.data_inputs:\n msg = \"No data inputs provided\"\n raise TypeError(msg)\n\n documents = []\n if isinstance(self.data_inputs, Data):\n self.data_inputs.text_key = self.text_key\n documents = [self.data_inputs.to_lc_document()]\n else:\n try:\n documents = [input_.to_lc_document() for input_ in self.data_inputs if isinstance(input_, Data)]\n if not documents:\n msg = f\"No valid Data inputs found in {type(self.data_inputs)}\"\n raise TypeError(msg)\n except AttributeError as e:\n msg = f\"Invalid input type in collection: {e}\"\n raise TypeError(msg) from e\n try:\n # Convert string 'False'/'True' to boolean\n keep_sep = self.keep_separator\n if isinstance(keep_sep, str):\n if keep_sep.lower() == \"false\":\n keep_sep = False\n elif keep_sep.lower() == \"true\":\n keep_sep = True\n # 'start' and 'end' are kept as strings\n self.log(documents)\n splitter = CharacterTextSplitter(\n chunk_overlap=self.chunk_overlap,\n chunk_size=self.chunk_size,\n separator=separator,\n keep_separator=keep_sep,\n )\n return splitter.split_documents(documents)\n except Exception as e:\n msg = f\"Error splitting text: {e}\"\n raise TypeError(msg) from e\n\n def split_text(self) -> DataFrame:\n return DataFrame(self._docs_to_data(self.split_text_base()))\n"
},
"data_inputs": {
"_input_type": "HandleInput",
"advanced": false,
"display_name": "Input",
"dynamic": false,
"info": "The data with texts to split in chunks.",
"input_types": [
"Data",
"DataFrame",
"Message"
],
"list": false,
"list_add_label": "Add More",
"name": "data_inputs",
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "other",
"value": ""
},
"keep_separator": {
"_input_type": "DropdownInput",
"advanced": true,
"combobox": false,
"dialog_inputs": {},
"display_name": "Keep Separator",
"dynamic": false,
"info": "Whether to keep the separator in the output chunks and where to place it.",
"name": "keep_separator",
"options": [
"False",
"True",
"Start",
"End"
],
"options_metadata": [],
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"toggle": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "False"
},
"separator": {
"_input_type": "MessageTextInput",
"advanced": false,
"display_name": "Separator",
"dynamic": false,
"info": "The character to split on. Use \\n for newline. Examples: \\n\\n for paragraphs, \\n for lines, . for sentences",
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "separator",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "\n"
},
"text_key": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Text Key",
"dynamic": false,
"info": "The key to use for the text column.",
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "text_key",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "text"
}
},
"tool_mode": false
},
"selected_output": "chunks",
"type": "SplitText"
},
"dragging": false,
"height": 475,
"id": "SplitText-3ZI5B",
"measured": {
"height": 475,
"width": 320
},
"position": {
"x": 1729.1788373023007,
"y": 1330.8003441546418
},
"positionAbsolute": {
"x": 1683.4543896546102,
"y": 1350.7871623588553
},
"selected": false,
"type": "genericNode",
"width": 320
},
{
"data": {
"id": "OpenSearchHybrid-XtKoA",
"node": {
"base_classes": [
"Data",
"DataFrame",
"VectorStore"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
"description": "Hybrid search: KNN + keyword, with optional filters, min_score, and aggregations.",
"display_name": "OpenSearch (Hybrid)",
"documentation": "",
"edited": true,
"field_order": [
"docs_metadata",
"opensearch_url",
"index_name",
"engine",
"space_type",
"ef_construction",
"m",
"ingest_data",
"search_query",
"should_cache_vector_store",
"embedding",
"vector_field",
"number_of_results",
"filter_expression",
"auth_mode",
"username",
"password",
"jwt_token",
"jwt_header",
"bearer_prefix",
"use_ssl",
"verify_certs"
],
"frozen": false,
"icon": "OpenSearch",
"legacy": false,
"metadata": {
"code_hash": "deee3f04cb47",
"dependencies": {
"dependencies": [
{
"name": "langflow",
"version": "1.5.0.post2"
},
{
"name": "opensearchpy",
"version": "2.8.0"
}
],
"total_dependencies": 2
},
"module": "custom_components.opensearch_hybrid"
},
"minimized": false,
"output_types": [],
"outputs": [
{
"allows_loop": false,
"cache": true,
"display_name": "Search Results",
"group_outputs": false,
"hidden": null,
"method": "search_documents",
"name": "search_results",
"options": null,
"required_inputs": null,
"selected": "Data",
"tool_mode": true,
"types": [
"Data"
],
"value": "__UNDEFINED__"
},
{
"allows_loop": false,
"cache": true,
"display_name": "DataFrame",
"group_outputs": false,
"hidden": null,
"method": "as_dataframe",
"name": "dataframe",
"options": null,
"required_inputs": null,
"selected": "DataFrame",
"tool_mode": true,
"types": [
"DataFrame"
],
"value": "__UNDEFINED__"
},
{
"allows_loop": false,
"cache": true,
"display_name": "Vector Store Connection",
"group_outputs": false,
"hidden": true,
"method": "as_vector_store",
"name": "vectorstoreconnection",
"options": null,
"required_inputs": null,
"selected": "VectorStore",
"tool_mode": true,
"types": [
"VectorStore"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"template": {
"_type": "Component",
"auth_mode": {
"_input_type": "DropdownInput",
"advanced": false,
"combobox": false,
"dialog_inputs": {},
"display_name": "Auth Mode",
"dynamic": false,
"info": "Choose Basic (username/password) or JWT (Bearer token).",
"load_from_db": false,
"name": "auth_mode",
"options": [
"basic",
"jwt"
],
"options_metadata": [],
"placeholder": "",
"real_time_refresh": true,
"required": false,
"show": true,
"title_case": false,
"toggle": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "jwt"
},
"bearer_prefix": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Prefix 'Bearer '",
"dynamic": false,
"info": "",
"list": false,
"list_add_label": "Add More",
"name": "bearer_prefix",
"placeholder": "",
"required": false,
"show": false,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
},
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "from __future__ import annotations\n\nimport json\nimport uuid\nfrom typing import Any, Dict, List, Optional\n\nfrom langflow.base.vectorstores.model import (\n LCVectorStoreComponent,\n check_cached_vector_store,\n)\nfrom langflow.base.vectorstores.vector_store_connection_decorator import (\n vector_store_connection,\n)\nfrom langflow.io import (\n BoolInput,\n DropdownInput,\n HandleInput,\n IntInput,\n MultilineInput,\n SecretStrInput,\n StrInput,\n TableInput,\n)\nfrom langflow.logging import logger\nfrom langflow.schema.data import Data\nfrom opensearchpy import OpenSearch, helpers\n\n\n@vector_store_connection\nclass OpenSearchHybridComponent(LCVectorStoreComponent):\n \"\"\"OpenSearch hybrid search: KNN (k=10, boost=0.7) + multi_match (boost=0.3) with optional filters & min_score.\"\"\"\n\n display_name: str = \"OpenSearch (Hybrid)\"\n name: str = \"OpenSearchHybrid\"\n icon: str = \"OpenSearch\"\n description: str = \"Hybrid search: KNN + keyword, with optional filters, min_score, and aggregations.\"\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[\n i.name for i in LCVectorStoreComponent.inputs\n ], # search_query, add_documents, etc.\n \"embedding\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Ingestion Metadata\",\n info=\"Key value pairs to be inserted into each ingested document.\",\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n advanced=True,\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=\"URL for your OpenSearch cluster.\",\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=\"The index to search.\",\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=\"Vector search engine to use.\",\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Space Type\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=\"Distance metric for vector similarity.\",\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=\"Size of the dynamic list used during k-NN graph creation.\",\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=\"Number of bidirectional links created for each new element.\",\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(\n name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"]\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Vector Field\",\n value=\"chunk_embedding\",\n advanced=True,\n info=\"Vector field used for KNN.\",\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Size (limit)\",\n value=10,\n advanced=True,\n info=\"Default number of hits when no limit provided in filter_expression.\",\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Filter Expression (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON to control filters/limit/score threshold.\\n\"\n \"Accepted shapes:\\n\"\n '1) {\"filter\": [ {\"term\": {\"filename\":\"foo\"}}, {\"terms\":{\"owner\":[\"u1\",\"u2\"]}} ], \"limit\": 10, \"score_threshold\": 1.6 }\\n'\n '2) Context-style maps: {\"data_sources\":[\"fileA\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"123\"]}\\n'\n \"Placeholders with __IMPOSSIBLE_VALUE__ are ignored.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Auth Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=\"Choose Basic (username/password) or JWT (Bearer token).\",\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=False,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"Password\",\n value=\"admin\",\n show=False,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=True,\n show=True,\n info=\"Paste a valid JWT (sent as a header).\",\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(name=\"use_ssl\", display_name=\"Use SSL\", value=True, advanced=True),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify Certificates\",\n value=False,\n advanced=True,\n ),\n ]\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> Dict[str, Any]:\n \"\"\"For Approximate k-NN Search, this is the default mapping to create index.\"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n }\n }\n },\n }\n\n def _validate_aoss_with_engines(self, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate AOSS with the engine.\"\"\"\n if is_aoss and engine != \"nmslib\" and engine != \"faiss\":\n raise ValueError(\n \"Amazon OpenSearch Service Serverless only \"\n \"supports `nmslib` or `faiss` engines\"\n )\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Check if the service is http_auth is set as `aoss`.\"\"\"\n if (\n http_auth is not None\n and hasattr(http_auth, \"service\")\n and http_auth.service == \"aoss\"\n ):\n return True\n return False\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: List[List[float]],\n texts: List[str],\n metadatas: Optional[List[dict]] = None,\n ids: Optional[List[str]] = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n mapping: Optional[Dict] = None,\n max_chunk_bytes: Optional[int] = 1 * 1024 * 1024,\n is_aoss: bool = False,\n ) -> List[str]:\n \"\"\"Bulk Ingest Embeddings into given index.\"\"\"\n if not mapping:\n mapping = dict()\n\n requests = []\n return_ids = []\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n self.log(metadatas[i])\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> Dict[str, Any]:\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n raise ValueError(\"Auth Mode is 'jwt' but no jwt_token was provided.\")\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n raise ValueError(\"Auth Mode is 'basic' but username/password are missing.\")\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our “vector store.”\n self.log(self.ingest_data)\n client = self.build_client()\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n docs = self.ingest_data or []\n if not docs:\n self.log(\"No documents to ingest.\")\n return\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n if not self.embedding:\n raise ValueError(\"Embedding handle is required to embed documents.\")\n\n # Generate embeddings\n vectors = self.embedding.embed_documents(texts)\n\n if not vectors:\n self.log(\"No vectors generated from documents.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss, engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=self.vector_field,\n )\n\n self.log(\n f\"Indexing {len(texts)} documents into '{self.index_name}' with proper KNN mapping...\"\n )\n\n # Use the LangChain-style bulk ingestion\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=self.vector_field,\n text_field=\"text\",\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n self.log(f\"Successfully indexed {len(return_ids)} documents.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> List[dict]:\n \"\"\"\n Accepts either:\n A) {\"filter\":[ ...term/terms objects... ], \"limit\":..., \"score_threshold\":...}\n B) Context-style: {\"data_sources\":[...], \"document_types\":[...], \"owners\":[...]}\n Returns a list of OS filter clauses (term/terms), skipping placeholders and empty terms.\n \"\"\"\n\n if not filter_obj:\n return []\n\n # If its a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except Exception:\n # Not valid JSON → treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n clauses: List[dict] = []\n for f in raw or []:\n if (\n \"term\" in f\n and isinstance(f[\"term\"], dict)\n and not self._is_placeholder_term(f[\"term\"])\n ):\n clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n clauses.append(f)\n return clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n clauses: List[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n clauses.append({\"term\": {field: values[0]}})\n else:\n clauses.append({\"terms\": {field: values}})\n return clauses\n\n # ---------- search (single hybrid path matching your tool) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression (can be either A or B shape; see _coerce_filter_clauses)\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n raise ValueError(f\"Invalid filter_expression JSON: {e}\") from e\n\n if not self.embedding:\n raise ValueError(\n \"Embedding is required to run hybrid search (KNN + keyword).\"\n )\n\n # Embed the query\n vec = self.embedding.embed_query(q)\n\n # Build filter clauses (accept both shapes)\n clauses = self._coerce_filter_clauses(filter_obj)\n\n # Respect the tool's limit/threshold defaults\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build the same hybrid body as your SearchService\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"knn\": {\n self.vector_field: {\n \"vector\": vec,\n \"k\": 10, # fixed to match the tool\n \"boost\": 0.7,\n }\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3,\n }\n },\n ],\n \"minimum_should_match\": 1,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n if clauses:\n body[\"query\"][\"bool\"][\"filter\"] = clauses\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n # top-level min_score (matches your tool)\n body[\"min_score\"] = score_threshold\n\n resp = client.search(index=self.index_name, body=body)\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n try:\n raw = self.search(self.search_query or \"\")\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n self.log(self.ingest_data)\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(\n self, build_config: dict, field_value: str, field_name: str | None = None\n ) -> dict:\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n if is_basic:\n build_config[\"jwt_token\"][\"value\"] = \"\"\n\n return build_config\n\n return build_config\n\n except Exception as e:\n self.log(f\"update_build_config error: {e}\")\n return build_config\n"
},
"docs_metadata": {
"_input_type": "TableInput",
"advanced": true,
"display_name": "Ingestion Metadata",
"dynamic": false,
"info": "Key value pairs to be inserted into each ingested document.",
"is_list": true,
"list_add_label": "Add More",
"name": "docs_metadata",
"placeholder": "",
"required": false,
"show": true,
"table_icon": "Table",
"table_schema": {
"columns": [
{
"default": "None",
"description": "Key name",
"disable_edit": false,
"display_name": "Key",
"edit_mode": "popover",
"filterable": true,
"formatter": "text",
"hidden": false,
"name": "key",
"sortable": true,
"type": "str"
},
{
"default": "None",
"description": "Value of the metadata",
"disable_edit": false,
"display_name": "Value",
"edit_mode": "popover",
"filterable": true,
"formatter": "text",
"hidden": false,
"name": "value",
"sortable": true,
"type": "str"
}
]
},
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"trigger_icon": "Table",
"trigger_text": "Open table",
"type": "table",
"value": []
},
"ef_construction": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "EF Construction",
"dynamic": false,
"info": "Size of the dynamic list used during k-NN graph creation.",
"list": false,
"list_add_label": "Add More",
"name": "ef_construction",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "int",
"value": 512
},
"embedding": {
"_input_type": "HandleInput",
"advanced": false,
"display_name": "Embedding",
"dynamic": false,
"info": "",
"input_types": [
"Embeddings"
],
"list": false,
"list_add_label": "Add More",
"name": "embedding",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "other",
"value": ""
},
"engine": {
"_input_type": "DropdownInput",
"advanced": true,
"combobox": false,
"dialog_inputs": {},
"display_name": "Engine",
"dynamic": false,
"info": "Vector search engine to use.",
"load_from_db": false,
"name": "engine",
"options": [
"jvector",
"nmslib",
"faiss",
"lucene"
],
"options_metadata": [],
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"toggle": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "nmslib"
},
"filter_expression": {
"_input_type": "MultilineInput",
"advanced": false,
"copy_field": false,
"display_name": "Filter Expression (JSON)",
"dynamic": false,
"info": "Optional JSON to control filters/limit/score threshold.\nAccepted shapes:\n1) {\"filter\": [ {\"term\": {\"filename\":\"foo\"}}, {\"terms\":{\"owner\":[\"u1\",\"u2\"]}} ], \"limit\": 10, \"score_threshold\": 1.6 }\n2) Context-style maps: {\"data_sources\":[\"fileA\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"123\"]}\nPlaceholders with __IMPOSSIBLE_VALUE__ are ignored.",
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"multiline": true,
"name": "filter_expression",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"index_name": {
"_input_type": "StrInput",
"advanced": false,
"display_name": "Index Name",
"dynamic": false,
"info": "The index to search.",
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "index_name",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "documents"
},
"ingest_data": {
"_input_type": "HandleInput",
"advanced": false,
"display_name": "Ingest Data",
"dynamic": false,
"info": "",
"input_types": [
"Data",
"DataFrame"
],
"list": true,
"list_add_label": "Add More",
"name": "ingest_data",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "other",
"value": ""
},
"jwt_header": {
"_input_type": "StrInput",
"advanced": true,
"display_name": "JWT Header Name",
"dynamic": false,
"info": "",
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "jwt_header",
"placeholder": "",
"required": false,
"show": false,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "Authorization"
},
"jwt_token": {
"_input_type": "SecretStrInput",
"advanced": false,
"display_name": "JWT Token",
"dynamic": false,
"info": "Paste a valid JWT (sent as a header).",
"input_types": [],
"load_from_db": true,
"name": "jwt_token",
"password": true,
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"type": "str",
"value": ""
},
"m": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "M Parameter",
"dynamic": false,
"info": "Number of bidirectional links created for each new element.",
"list": false,
"list_add_label": "Add More",
"name": "m",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "int",
"value": 16
},
"number_of_results": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "Default Size (limit)",
"dynamic": false,
"info": "Default number of hits when no limit provided in filter_expression.",
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "number_of_results",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "int",
"value": 15
},
"opensearch_url": {
"_input_type": "StrInput",
"advanced": false,
"display_name": "OpenSearch URL",
"dynamic": false,
"info": "URL for your OpenSearch cluster.",
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "opensearch_url",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "https://opensearch:9200"
},
"password": {
"_input_type": "SecretStrInput",
"advanced": false,
"display_name": "Password",
"dynamic": false,
"info": "",
"input_types": [],
"load_from_db": false,
"name": "password",
"password": true,
"placeholder": "",
"required": false,
"show": false,
"title_case": false,
"type": "str",
"value": ""
},
"search_query": {
"_input_type": "QueryInput",
"advanced": false,
"display_name": "Search Query",
"dynamic": false,
"info": "Enter a query to run a similarity search.",
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "search_query",
"placeholder": "Enter a query...",
"required": false,
"show": true,
"title_case": false,
"tool_mode": true,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "query",
"value": ""
},
"should_cache_vector_store": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Cache Vector Store",
"dynamic": false,
"info": "If True, the vector store will be cached for the current build of the component. This is useful for components that have multiple output methods and want to share the same vector store.",
"list": false,
"list_add_label": "Add More",
"name": "should_cache_vector_store",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
},
"space_type": {
"_input_type": "DropdownInput",
"advanced": true,
"combobox": false,
"dialog_inputs": {},
"display_name": "Space Type",
"dynamic": false,
"info": "Distance metric for vector similarity.",
"name": "space_type",
"options": [
"l2",
"l1",
"cosinesimil",
"linf",
"innerproduct"
],
"options_metadata": [],
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"toggle": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "l2"
},
"use_ssl": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Use SSL",
"dynamic": false,
"info": "",
"list": false,
"list_add_label": "Add More",
"name": "use_ssl",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
},
"username": {
"_input_type": "StrInput",
"advanced": false,
"display_name": "Username",
"dynamic": false,
"info": "",
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "username",
"placeholder": "",
"required": false,
"show": false,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "admin"
},
"vector_field": {
"_input_type": "StrInput",
"advanced": true,
"display_name": "Vector Field",
"dynamic": false,
"info": "Vector field used for KNN.",
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "vector_field",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "chunk_embedding"
},
"verify_certs": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Verify Certificates",
"dynamic": false,
"info": "",
"list": false,
"list_add_label": "Add More",
"name": "verify_certs",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "bool",
"value": false
}
},
"tool_mode": false
},
"selected_output": "search_results",
"showNode": true,
"type": "OpenSearchHybrid"
},
"dragging": false,
"id": "OpenSearchHybrid-XtKoA",
"measured": {
"height": 760,
"width": 320
},
"position": {
"x": 2218.9287723423276,
"y": 1332.2598463956504
},
"selected": false,
"type": "genericNode"
},
{
"data": {
"description": "Uses Docling to process input documents connecting to your instance of Docling Serve.",
"display_name": "Docling Serve",
"id": "DoclingRemote-78KoX",
"node": {
"base_classes": [
"DataFrame"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
"description": "Uses Docling to process input documents connecting to your instance of Docling Serve.",
"display_name": "Docling Serve",
"documentation": "https://docling-project.github.io/docling/",
"edited": false,
"field_order": [
"path",
"file_path",
"separator",
"silent_errors",
"delete_server_file_after_processing",
"ignore_unsupported_extensions",
"ignore_unspecified_files",
"api_url",
"max_concurrency",
"max_poll_timeout",
"api_headers",
"docling_serve_opts"
],
"frozen": false,
"icon": "Docling",
"legacy": false,
"metadata": {
"code_hash": "880538860431",
"dependencies": {
"dependencies": [
{
"name": "httpx",
"version": "0.28.1"
},
{
"name": "docling_core",
"version": "2.45.0"
},
{
"name": "pydantic",
"version": "2.10.6"
},
{
"name": "langflow",
"version": null
}
],
"total_dependencies": 4
},
"module": "custom_components.docling_serve"
},
"minimized": false,
"output_types": [],
"outputs": [
{
"allows_loop": false,
"cache": true,
"display_name": "Files",
"group_outputs": false,
"method": "load_files",
"name": "dataframe",
"options": null,
"required_inputs": null,
"selected": "DataFrame",
"tool_mode": true,
"types": [
"DataFrame"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"template": {
"_type": "Component",
"api_headers": {
"_input_type": "NestedDictInput",
"advanced": true,
"display_name": "HTTP headers",
"dynamic": false,
"info": "Optional dictionary of additional headers required for connecting to Docling Serve.",
"list": false,
"list_add_label": "Add More",
"name": "api_headers",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "NestedDict",
"value": {}
},
"api_url": {
"_input_type": "StrInput",
"advanced": false,
"display_name": "Server address",
"dynamic": false,
"info": "URL of the Docling Serve instance.",
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "api_url",
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "http://localhost:5001"
},
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "import base64\nimport time\nfrom concurrent.futures import Future, ThreadPoolExecutor\nfrom pathlib import Path\nfrom typing import Any\n\nimport httpx\nfrom docling_core.types.doc import DoclingDocument\nfrom pydantic import ValidationError\n\nfrom langflow.base.data import BaseFileComponent\nfrom langflow.inputs import IntInput, NestedDictInput, StrInput\nfrom langflow.inputs.inputs import FloatInput\nfrom langflow.schema import Data\n\n\nclass DoclingRemoteComponent(BaseFileComponent):\n display_name = \"Docling Serve\"\n description = \"Uses Docling to process input documents connecting to your instance of Docling Serve.\"\n documentation = \"https://docling-project.github.io/docling/\"\n trace_type = \"tool\"\n icon = \"Docling\"\n name = \"DoclingRemote\"\n\n MAX_500_RETRIES = 5\n\n # https://docling-project.github.io/docling/usage/supported_formats/\n VALID_EXTENSIONS = [\n \"adoc\",\n \"asciidoc\",\n \"asc\",\n \"bmp\",\n \"csv\",\n \"dotx\",\n \"dotm\",\n \"docm\",\n \"docx\",\n \"htm\",\n \"html\",\n \"jpeg\",\n \"json\",\n \"md\",\n \"pdf\",\n \"png\",\n \"potx\",\n \"ppsx\",\n \"pptm\",\n \"potm\",\n \"ppsm\",\n \"pptx\",\n \"tiff\",\n \"txt\",\n \"xls\",\n \"xlsx\",\n \"xhtml\",\n \"xml\",\n \"webp\",\n ]\n\n inputs = [\n *BaseFileComponent._base_inputs,\n StrInput(\n name=\"api_url\",\n display_name=\"Server address\",\n info=\"URL of the Docling Serve instance.\",\n required=True,\n ),\n IntInput(\n name=\"max_concurrency\",\n display_name=\"Concurrency\",\n info=\"Maximum number of concurrent requests for the server.\",\n advanced=True,\n value=2,\n ),\n FloatInput(\n name=\"max_poll_timeout\",\n display_name=\"Maximum poll time\",\n info=\"Maximum waiting time for the document conversion to complete.\",\n advanced=True,\n value=3600,\n ),\n NestedDictInput(\n name=\"api_headers\",\n display_name=\"HTTP headers\",\n advanced=True,\n required=False,\n info=(\"Optional dictionary of additional headers required for connecting to Docling Serve.\"),\n ),\n NestedDictInput(\n name=\"docling_serve_opts\",\n display_name=\"Docling options\",\n advanced=True,\n required=False,\n info=(\n \"Optional dictionary of additional options. \"\n \"See https://github.com/docling-project/docling-serve/blob/main/docs/usage.md for more information.\"\n ),\n ),\n ]\n\n outputs = [\n *BaseFileComponent._base_outputs,\n ]\n\n def process_files(self, file_list: list[BaseFileComponent.BaseFile]) -> list[BaseFileComponent.BaseFile]:\n base_url = f\"{self.api_url}/v1alpha\"\n\n def _convert_document(client: httpx.Client, file_path: Path, options: dict[str, Any]) -> Data | None:\n encoded_doc = base64.b64encode(file_path.read_bytes()).decode()\n payload = {\n \"options\": options,\n \"file_sources\": [{\"base64_string\": encoded_doc, \"filename\": file_path.name}],\n }\n\n response = client.post(f\"{base_url}/convert/source/async\", json=payload)\n response.raise_for_status()\n task = response.json()\n\n http_failures = 0\n retry_status_start = 500\n retry_status_end = 600\n start_wait_time = time.monotonic()\n while task[\"task_status\"] not in (\"success\", \"failure\"):\n # Check if processing exceeds the maximum poll timeout\n processing_time = time.monotonic() - start_wait_time\n if processing_time >= self.max_poll_timeout:\n msg = (\n f\"Processing time {processing_time=} exceeds the maximum poll timeout {self.max_poll_timeout=}.\"\n \"Please increase the max_poll_timeout parameter or review why the processing \"\n \"takes long on the server.\"\n )\n self.log(msg)\n raise RuntimeError(msg)\n\n # Call for a new status update\n time.sleep(2)\n response = client.get(f\"{base_url}/status/poll/{task['task_id']}\")\n\n # Check if the status call gets into 5xx errors and retry\n if retry_status_start <= response.status_code < retry_status_end:\n http_failures += 1\n if http_failures > self.MAX_500_RETRIES:\n self.log(f\"The status requests got a http response {response.status_code} too many times.\")\n return None\n continue\n\n # Update task status\n task = response.json()\n\n result_resp = client.get(f\"{base_url}/result/{task['task_id']}\")\n result_resp.raise_for_status()\n result = result_resp.json()\n\n if \"json_content\" not in result[\"document\"] or result[\"document\"][\"json_content\"] is None:\n self.log(\"No JSON DoclingDocument found in the result.\")\n return None\n\n try:\n doc = DoclingDocument.model_validate(result[\"document\"][\"json_content\"])\n return Data(data={\"doc\": doc, \"file_path\": str(file_path)})\n except ValidationError as e:\n self.log(f\"Error validating the document. {e}\")\n return None\n\n docling_options = {\n \"to_formats\": [\"json\"],\n \"image_export_mode\": \"placeholder\",\n \"return_as_file\": False,\n **(self.docling_serve_opts or {}),\n }\n\n processed_data: list[Data | None] = []\n with (\n httpx.Client(headers=self.api_headers) as client,\n ThreadPoolExecutor(max_workers=self.max_concurrency) as executor,\n ):\n futures: list[tuple[int, Future]] = []\n for i, file in enumerate(file_list):\n if file.path is None:\n processed_data.append(None)\n continue\n\n futures.append((i, executor.submit(_convert_document, client, file.path, docling_options)))\n\n for _index, future in futures:\n try:\n result_data = future.result()\n processed_data.append(result_data)\n except (httpx.HTTPStatusError, httpx.RequestError, KeyError, ValueError) as exc:\n self.log(f\"Docling remote processing failed: {exc}\")\n raise\n\n return self.rollup_data(file_list, processed_data)\n"
},
"delete_server_file_after_processing": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Delete Server File After Processing",
"dynamic": false,
"info": "If true, the Server File Path will be deleted after processing.",
"list": false,
"list_add_label": "Add More",
"name": "delete_server_file_after_processing",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
},
"docling_serve_opts": {
"_input_type": "NestedDictInput",
"advanced": false,
"display_name": "Docling options",
"dynamic": false,
"info": "Optional dictionary of additional options. See https://github.com/docling-project/docling-serve/blob/main/docs/usage.md for more information.",
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "docling_serve_opts",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "NestedDict",
"value": {
"do_ocr": false
}
},
"file_path": {
"_input_type": "HandleInput",
"advanced": true,
"display_name": "Server File Path",
"dynamic": false,
"info": "Data object with a 'file_path' property pointing to server file or a Message object with a path to the file. Supercedes 'Path' but supports same file types.",
"input_types": [
"Data",
"Message"
],
"list": true,
"list_add_label": "Add More",
"name": "file_path",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "other",
"value": ""
},
"ignore_unspecified_files": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Ignore Unspecified Files",
"dynamic": false,
"info": "If true, Data with no 'file_path' property will be ignored.",
"list": false,
"list_add_label": "Add More",
"name": "ignore_unspecified_files",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "bool",
"value": false
},
"ignore_unsupported_extensions": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Ignore Unsupported Extensions",
"dynamic": false,
"info": "If true, files with unsupported extensions will not be processed.",
"list": false,
"list_add_label": "Add More",
"name": "ignore_unsupported_extensions",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "bool",
"value": true
},
"max_concurrency": {
"_input_type": "IntInput",
"advanced": false,
"display_name": "Concurrency",
"dynamic": false,
"info": "Maximum number of concurrent requests for the server.",
"list": false,
"list_add_label": "Add More",
"name": "max_concurrency",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "int",
"value": 2
},
"max_poll_timeout": {
"_input_type": "FloatInput",
"advanced": true,
"display_name": "Maximum poll time",
"dynamic": false,
"info": "Maximum waiting time for the document conversion to complete.",
"list": false,
"list_add_label": "Add More",
"name": "max_poll_timeout",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "float",
"value": 3600
},
"path": {
"_input_type": "FileInput",
"advanced": false,
"display_name": "Files",
"dynamic": false,
"fileTypes": [
"adoc",
"asciidoc",
"asc",
"bmp",
"csv",
"dotx",
"dotm",
"docm",
"docx",
"htm",
"html",
"jpeg",
"json",
"md",
"pdf",
"png",
"potx",
"ppsx",
"pptm",
"potm",
"ppsm",
"pptx",
"tiff",
"txt",
"xls",
"xlsx",
"xhtml",
"xml",
"webp",
"zip",
"tar",
"tgz",
"bz2",
"gz"
],
"file_path": [],
"info": "Supported file extensions: adoc, asciidoc, asc, bmp, csv, dotx, dotm, docm, docx, htm, html, jpeg, json, md, pdf, png, potx, ppsx, pptm, potm, ppsm, pptx, tiff, txt, xls, xlsx, xhtml, xml, webp; optionally bundled in file extensions: zip, tar, tgz, bz2, gz",
"list": true,
"list_add_label": "Add More",
"name": "path",
"placeholder": "",
"required": false,
"show": true,
"temp_file": false,
"title_case": false,
"trace_as_metadata": true,
"type": "file",
"value": ""
},
"separator": {
"_input_type": "StrInput",
"advanced": true,
"display_name": "Separator",
"dynamic": false,
"info": "Specify the separator to use between multiple outputs in Message format.",
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "separator",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "\n\n"
},
"silent_errors": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Silent Errors",
"dynamic": false,
"info": "If true, errors will not raise an exception.",
"list": false,
"list_add_label": "Add More",
"name": "silent_errors",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "bool",
"value": false
}
},
"tool_mode": false
},
"showNode": true,
"type": "DoclingRemote"
},
"dragging": false,
"id": "DoclingRemote-78KoX",
"measured": {
"height": 472,
"width": 320
},
"position": {
"x": 974.2998232996713,
"y": 1337.9345348080217
},
"selected": false,
"type": "genericNode"
},
{
"data": {
"description": "Export DoclingDocument to markdown, html or other formats.",
"display_name": "Export DoclingDocument",
"id": "ExportDoclingDocument-xFoCI",
"node": {
"base_classes": [
"Data",
"DataFrame"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
"description": "Export DoclingDocument to markdown, html or other formats.",
"display_name": "Export DoclingDocument",
"documentation": "https://docling-project.github.io/docling/",
"edited": false,
"field_order": [
"data_inputs",
"export_format",
"image_mode",
"md_image_placeholder",
"md_page_break_placeholder",
"doc_key"
],
"frozen": false,
"icon": "Docling",
"legacy": false,
"metadata": {
"code_hash": "451c9673bd4c",
"dependencies": {
"dependencies": [
{
"name": "docling_core",
"version": "2.45.0"
},
{
"name": "langflow",
"version": null
}
],
"total_dependencies": 2
},
"module": "custom_components.export_doclingdocument"
},
"minimized": false,
"output_types": [],
"outputs": [
{
"allows_loop": false,
"cache": true,
"display_name": "Exported data",
"group_outputs": false,
"method": "export_document",
"name": "data",
"options": null,
"required_inputs": null,
"selected": "Data",
"tool_mode": true,
"types": [
"Data"
],
"value": "__UNDEFINED__"
},
{
"allows_loop": false,
"cache": true,
"display_name": "DataFrame",
"group_outputs": false,
"method": "as_dataframe",
"name": "dataframe",
"options": null,
"required_inputs": null,
"selected": "DataFrame",
"tool_mode": true,
"types": [
"DataFrame"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"template": {
"_type": "Component",
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "from typing import Any\n\nfrom docling_core.types.doc import ImageRefMode\n\nfrom langflow.base.data.docling_utils import extract_docling_documents\nfrom langflow.custom import Component\nfrom langflow.io import DropdownInput, HandleInput, MessageTextInput, Output, StrInput\nfrom langflow.schema import Data, DataFrame\n\n\nclass ExportDoclingDocumentComponent(Component):\n display_name: str = \"Export DoclingDocument\"\n description: str = \"Export DoclingDocument to markdown, html or other formats.\"\n documentation = \"https://docling-project.github.io/docling/\"\n icon = \"Docling\"\n name = \"ExportDoclingDocument\"\n\n inputs = [\n HandleInput(\n name=\"data_inputs\",\n display_name=\"Data or DataFrame\",\n info=\"The data with documents to export.\",\n input_types=[\"Data\", \"DataFrame\"],\n required=True,\n ),\n DropdownInput(\n name=\"export_format\",\n display_name=\"Export format\",\n options=[\"Markdown\", \"HTML\", \"Plaintext\", \"DocTags\"],\n info=\"Select the export format to convert the input.\",\n value=\"Markdown\",\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"image_mode\",\n display_name=\"Image export mode\",\n options=[\"placeholder\", \"embedded\"],\n info=(\n \"Specify how images are exported in the output. Placeholder will replace the images with a string, \"\n \"whereas Embedded will include them as base64 encoded images.\"\n ),\n value=\"placeholder\",\n ),\n StrInput(\n name=\"md_image_placeholder\",\n display_name=\"Image placeholder\",\n info=\"Specify the image placeholder for markdown exports.\",\n value=\"<!-- image -->\",\n advanced=True,\n ),\n StrInput(\n name=\"md_page_break_placeholder\",\n display_name=\"Page break placeholder\",\n info=\"Add this placeholder betweek pages in the markdown output.\",\n value=\"\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"doc_key\",\n display_name=\"Doc Key\",\n info=\"The key to use for the DoclingDocument column.\",\n value=\"doc\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Exported data\", name=\"data\", method=\"export_document\"),\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"as_dataframe\"),\n ]\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict:\n if field_name == \"export_format\" and field_value == \"Markdown\":\n build_config[\"md_image_placeholder\"][\"show\"] = True\n build_config[\"md_page_break_placeholder\"][\"show\"] = True\n build_config[\"image_mode\"][\"show\"] = True\n elif field_name == \"export_format\" and field_value == \"HTML\":\n build_config[\"md_image_placeholder\"][\"show\"] = False\n build_config[\"md_page_break_placeholder\"][\"show\"] = False\n build_config[\"image_mode\"][\"show\"] = True\n elif field_name == \"export_format\" and field_value in {\"Plaintext\", \"DocTags\"}:\n build_config[\"md_image_placeholder\"][\"show\"] = False\n build_config[\"md_page_break_placeholder\"][\"show\"] = False\n build_config[\"image_mode\"][\"show\"] = False\n\n return build_config\n\n def export_document(self) -> list[Data]:\n documents = extract_docling_documents(self.data_inputs, self.doc_key)\n\n results: list[Data] = []\n try:\n image_mode = ImageRefMode(self.image_mode)\n for doc in documents:\n content = \"\"\n if self.export_format == \"Markdown\":\n content = doc.export_to_markdown(\n image_mode=image_mode,\n image_placeholder=self.md_image_placeholder,\n page_break_placeholder=self.md_page_break_placeholder,\n )\n elif self.export_format == \"HTML\":\n content = doc.export_to_html(image_mode=image_mode)\n elif self.export_format == \"Plaintext\":\n content = doc.export_to_text()\n elif self.export_format == \"DocTags\":\n content = doc.export_to_doctags()\n\n results.append(Data(text=content))\n except Exception as e:\n msg = f\"Error splitting text: {e}\"\n raise TypeError(msg) from e\n\n return results\n\n def as_dataframe(self) -> DataFrame:\n return DataFrame(self.export_document())\n"
},
"data_inputs": {
"_input_type": "HandleInput",
"advanced": false,
"display_name": "Data or DataFrame",
"dynamic": false,
"info": "The data with documents to export.",
"input_types": [
"Data",
"DataFrame"
],
"list": false,
"list_add_label": "Add More",
"name": "data_inputs",
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"trace_as_metadata": true,
"type": "other",
"value": ""
},
"doc_key": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "Doc Key",
"dynamic": false,
"info": "The key to use for the DoclingDocument column.",
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "doc_key",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "doc"
},
"export_format": {
"_input_type": "DropdownInput",
"advanced": false,
"combobox": false,
"dialog_inputs": {},
"display_name": "Export format",
"dynamic": false,
"info": "Select the export format to convert the input.",
"name": "export_format",
"options": [
"Markdown",
"HTML",
"Plaintext",
"DocTags"
],
"options_metadata": [],
"placeholder": "",
"real_time_refresh": true,
"required": false,
"show": true,
"title_case": false,
"toggle": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "Markdown"
},
"image_mode": {
"_input_type": "DropdownInput",
"advanced": false,
"combobox": false,
"dialog_inputs": {},
"display_name": "Image export mode",
"dynamic": false,
"info": "Specify how images are exported in the output. Placeholder will replace the images with a string, whereas Embedded will include them as base64 encoded images.",
"name": "image_mode",
"options": [
"placeholder",
"embedded"
],
"options_metadata": [],
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"toggle": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "placeholder"
},
"md_image_placeholder": {
"_input_type": "StrInput",
"advanced": true,
"display_name": "Image placeholder",
"dynamic": false,
"info": "Specify the image placeholder for markdown exports.",
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "md_image_placeholder",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "<!-- image -->"
},
"md_page_break_placeholder": {
"_input_type": "StrInput",
"advanced": true,
"display_name": "Page break placeholder",
"dynamic": false,
"info": "Add this placeholder betweek pages in the markdown output.",
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "md_page_break_placeholder",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": ""
}
},
"tool_mode": false
},
"selected_output": "data",
"showNode": true,
"type": "ExportDoclingDocument"
},
"dragging": false,
"id": "ExportDoclingDocument-xFoCI",
"measured": {
"height": 344,
"width": 320
},
"position": {
"x": 1354.7013688969873,
"y": 1365.2986945152204
},
"selected": false,
"type": "genericNode"
},
{
"data": {
"id": "EmbeddingModel-eZ6bT",
"node": {
"base_classes": [
"Embeddings"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
"description": "Generate embeddings using a specified provider.",
"display_name": "Embedding Model",
"documentation": "https://docs.langflow.org/components-embedding-models",
"edited": false,
"field_order": [
"provider",
"model",
"api_key",
"api_base",
"dimensions",
"chunk_size",
"request_timeout",
"max_retries",
"show_progress_bar",
"model_kwargs"
],
"frozen": false,
"icon": "binary",
"last_updated": "2025-09-22T15:54:52.885Z",
"legacy": false,
"metadata": {
"code_hash": "93faf11517da",
"dependencies": {
"dependencies": [
{
"name": "langchain_openai",
"version": "0.3.23"
},
{
"name": "langflow",
"version": null
}
],
"total_dependencies": 2
},
"module": "langflow.components.models.embedding_model.EmbeddingModelComponent"
},
"minimized": false,
"output_types": [],
"outputs": [
{
"allows_loop": false,
"cache": true,
"display_name": "Embedding Model",
"group_outputs": false,
"method": "build_embeddings",
"name": "embeddings",
"options": null,
"required_inputs": null,
"selected": "Embeddings",
"tool_mode": true,
"types": [
"Embeddings"
],
"value": "__UNDEFINED__"
}
],
"pinned": false,
"template": {
"_type": "Component",
"api_base": {
"_input_type": "MessageTextInput",
"advanced": true,
"display_name": "API Base URL",
"dynamic": false,
"info": "Base URL for the API. Leave empty for default.",
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
"name": "api_base",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": ""
},
"api_key": {
"_input_type": "SecretStrInput",
"advanced": false,
"display_name": "OpenAI API Key",
"dynamic": false,
"info": "Model Provider API key",
"input_types": [],
"load_from_db": true,
"name": "api_key",
"password": true,
"placeholder": "",
"real_time_refresh": true,
"required": true,
"show": true,
"title_case": false,
"type": "str",
"value": "OPENAI_API_KEY"
},
"chunk_size": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "Chunk Size",
"dynamic": false,
"info": "",
"list": false,
"list_add_label": "Add More",
"name": "chunk_size",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "int",
"value": 1000
},
"code": {
"advanced": true,
"dynamic": true,
"fileTypes": [],
"file_path": "",
"info": "",
"list": false,
"load_from_db": false,
"multiline": true,
"name": "code",
"password": false,
"placeholder": "",
"required": true,
"show": true,
"title_case": false,
"type": "code",
"value": "from typing import Any\n\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom langflow.base.embeddings.model import LCEmbeddingsModel\nfrom langflow.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom langflow.field_typing import Embeddings\nfrom langflow.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom langflow.schema.dotdict import dotdict\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}],\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n return OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:\n if field_name == \"provider\" and field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n return build_config\n"
},
"dimensions": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "Dimensions",
"dynamic": false,
"info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.",
"list": false,
"list_add_label": "Add More",
"name": "dimensions",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "int",
"value": ""
},
"max_retries": {
"_input_type": "IntInput",
"advanced": true,
"display_name": "Max Retries",
"dynamic": false,
"info": "",
"list": false,
"list_add_label": "Add More",
"name": "max_retries",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "int",
"value": 3
},
"model": {
"_input_type": "DropdownInput",
"advanced": false,
"combobox": false,
"dialog_inputs": {},
"display_name": "Model Name",
"dynamic": false,
"info": "Select the embedding model to use",
"name": "model",
"options": [
"text-embedding-3-small",
"text-embedding-3-large",
"text-embedding-ada-002"
],
"options_metadata": [],
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"toggle": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "text-embedding-3-small"
},
"model_kwargs": {
"_input_type": "DictInput",
"advanced": true,
"display_name": "Model Kwargs",
"dynamic": false,
"info": "Additional keyword arguments to pass to the model.",
"list": false,
"list_add_label": "Add More",
"name": "model_kwargs",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_input": true,
"type": "dict",
"value": {}
},
"provider": {
"_input_type": "DropdownInput",
"advanced": false,
"combobox": false,
"dialog_inputs": {},
"display_name": "Model Provider",
"dynamic": false,
"info": "Select the embedding model provider",
"name": "provider",
"options": [
"OpenAI"
],
"options_metadata": [
{
"icon": "OpenAI"
}
],
"placeholder": "",
"real_time_refresh": true,
"required": false,
"show": true,
"title_case": false,
"toggle": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": "OpenAI"
},
"request_timeout": {
"_input_type": "FloatInput",
"advanced": true,
"display_name": "Request Timeout",
"dynamic": false,
"info": "",
"list": false,
"list_add_label": "Add More",
"name": "request_timeout",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "float",
"value": ""
},
"show_progress_bar": {
"_input_type": "BoolInput",
"advanced": true,
"display_name": "Show Progress Bar",
"dynamic": false,
"info": "",
"list": false,
"list_add_label": "Add More",
"name": "show_progress_bar",
"placeholder": "",
"required": false,
"show": true,
"title_case": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "bool",
"value": false
}
},
"tool_mode": false
},
"showNode": true,
"type": "EmbeddingModel"
},
"dragging": false,
"id": "EmbeddingModel-eZ6bT",
"measured": {
"height": 369,
"width": 320
},
"position": {
"x": 1726.6943524438122,
"y": 1800.5330404375484
},
"selected": true,
"type": "genericNode"
}
],
"viewport": {
"x": -767.6929603556041,
"y": -1196.6455082358875,
"zoom": 0.9277466102702023
}
},
"description": "Load your data for chat context with Retrieval Augmented Generation.",
"endpoint_name": null,
"id": "1402618b-e6d1-4ff2-9a11-d6ce71186915",
"is_component": false,
"last_tested_version": "1.5.0.post2",
"name": "OpenSearch Ingestion Flow Docling Serve",
"tags": [
"openai",
"astradb",
"rag",
"q-a"
]
}