2631 lines
No EOL
184 KiB
JSON
2631 lines
No EOL
184 KiB
JSON
{
|
|
"data": {
|
|
"edges": [
|
|
{
|
|
"animated": false,
|
|
"className": "",
|
|
"data": {
|
|
"sourceHandle": {
|
|
"dataType": "SplitText",
|
|
"id": "SplitText-QIKhg",
|
|
"name": "dataframe",
|
|
"output_types": [
|
|
"DataFrame"
|
|
]
|
|
},
|
|
"targetHandle": {
|
|
"fieldName": "ingest_data",
|
|
"id": "OpenSearchHybrid-Ve6bS",
|
|
"inputTypes": [
|
|
"Data",
|
|
"DataFrame"
|
|
],
|
|
"type": "other"
|
|
}
|
|
},
|
|
"id": "xy-edge__SplitText-QIKhg{œdataTypeœ:œSplitTextœ,œidœ:œSplitText-QIKhgœ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}-OpenSearchHybrid-Ve6bS{œfieldNameœ:œingest_dataœ,œidœ:œOpenSearchHybrid-Ve6bSœ,œinputTypesœ:[œDataœ,œDataFrameœ],œtypeœ:œotherœ}",
|
|
"selected": false,
|
|
"source": "SplitText-QIKhg",
|
|
"sourceHandle": "{œdataTypeœ:œSplitTextœ,œidœ:œSplitText-QIKhgœ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}",
|
|
"target": "OpenSearchHybrid-Ve6bS",
|
|
"targetHandle": "{œfieldNameœ:œingest_dataœ,œidœ:œOpenSearchHybrid-Ve6bSœ,œinputTypesœ:[œDataœ,œDataFrameœ],œtypeœ:œotherœ}"
|
|
},
|
|
{
|
|
"animated": false,
|
|
"className": "",
|
|
"data": {
|
|
"sourceHandle": {
|
|
"dataType": "File",
|
|
"id": "File-PSU37",
|
|
"name": "message",
|
|
"output_types": [
|
|
"Message"
|
|
]
|
|
},
|
|
"targetHandle": {
|
|
"fieldName": "data_inputs",
|
|
"id": "SplitText-QIKhg",
|
|
"inputTypes": [
|
|
"Data",
|
|
"DataFrame",
|
|
"Message"
|
|
],
|
|
"type": "other"
|
|
}
|
|
},
|
|
"id": "xy-edge__File-PSU37{œdataTypeœ:œFileœ,œidœ:œFile-PSU37œ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-SplitText-QIKhg{œfieldNameœ:œdata_inputsœ,œidœ:œSplitText-QIKhgœ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}",
|
|
"selected": false,
|
|
"source": "File-PSU37",
|
|
"sourceHandle": "{œdataTypeœ:œFileœ,œidœ:œFile-PSU37œ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}",
|
|
"target": "SplitText-QIKhg",
|
|
"targetHandle": "{œfieldNameœ:œdata_inputsœ,œidœ:œSplitText-QIKhgœ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}"
|
|
},
|
|
{
|
|
"animated": false,
|
|
"className": "",
|
|
"data": {
|
|
"sourceHandle": {
|
|
"dataType": "EmbeddingModel",
|
|
"id": "EmbeddingModel-eZ6bT",
|
|
"name": "embeddings",
|
|
"output_types": [
|
|
"Embeddings"
|
|
]
|
|
},
|
|
"targetHandle": {
|
|
"fieldName": "embedding",
|
|
"id": "OpenSearchHybrid-Ve6bS",
|
|
"inputTypes": [
|
|
"Embeddings"
|
|
],
|
|
"type": "other"
|
|
}
|
|
},
|
|
"id": "xy-edge__EmbeddingModel-eZ6bT{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-eZ6bTœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-OpenSearchHybrid-Ve6bS{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchHybrid-Ve6bSœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}",
|
|
"selected": false,
|
|
"source": "EmbeddingModel-eZ6bT",
|
|
"sourceHandle": "{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-eZ6bTœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}",
|
|
"target": "OpenSearchHybrid-Ve6bS",
|
|
"targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchHybrid-Ve6bSœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}"
|
|
},
|
|
{
|
|
"animated": false,
|
|
"className": "",
|
|
"data": {
|
|
"sourceHandle": {
|
|
"dataType": "SecretInput",
|
|
"id": "SecretInput-F34VJ",
|
|
"name": "text",
|
|
"output_types": [
|
|
"Message"
|
|
]
|
|
},
|
|
"targetHandle": {
|
|
"fieldName": "dynamic_connector_type",
|
|
"id": "AdvancedDynamicFormBuilder-81Exw",
|
|
"inputTypes": [
|
|
"Text",
|
|
"Message"
|
|
],
|
|
"type": "str"
|
|
}
|
|
},
|
|
"id": "xy-edge__SecretInput-F34VJ{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-F34VJœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-81Exw{œfieldNameœ:œdynamic_connector_typeœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}",
|
|
"selected": false,
|
|
"source": "SecretInput-F34VJ",
|
|
"sourceHandle": "{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-F34VJœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}",
|
|
"target": "AdvancedDynamicFormBuilder-81Exw",
|
|
"targetHandle": "{œfieldNameœ:œdynamic_connector_typeœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}"
|
|
},
|
|
{
|
|
"animated": false,
|
|
"className": "",
|
|
"data": {
|
|
"sourceHandle": {
|
|
"dataType": "SecretInput",
|
|
"id": "SecretInput-b2cab",
|
|
"name": "text",
|
|
"output_types": [
|
|
"Message"
|
|
]
|
|
},
|
|
"targetHandle": {
|
|
"fieldName": "dynamic_owner",
|
|
"id": "AdvancedDynamicFormBuilder-81Exw",
|
|
"inputTypes": [
|
|
"Text",
|
|
"Message"
|
|
],
|
|
"type": "str"
|
|
}
|
|
},
|
|
"id": "xy-edge__SecretInput-b2cab{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-b2cabœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-81Exw{œfieldNameœ:œdynamic_ownerœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}",
|
|
"selected": false,
|
|
"source": "SecretInput-b2cab",
|
|
"sourceHandle": "{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-b2cabœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}",
|
|
"target": "AdvancedDynamicFormBuilder-81Exw",
|
|
"targetHandle": "{œfieldNameœ:œdynamic_ownerœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}"
|
|
},
|
|
{
|
|
"animated": false,
|
|
"className": "",
|
|
"data": {
|
|
"sourceHandle": {
|
|
"dataType": "SecretInput",
|
|
"id": "SecretInput-ZVfuS",
|
|
"name": "text",
|
|
"output_types": [
|
|
"Message"
|
|
]
|
|
},
|
|
"targetHandle": {
|
|
"fieldName": "dynamic_owner_email",
|
|
"id": "AdvancedDynamicFormBuilder-81Exw",
|
|
"inputTypes": [
|
|
"Text",
|
|
"Message"
|
|
],
|
|
"type": "str"
|
|
}
|
|
},
|
|
"id": "xy-edge__SecretInput-ZVfuS{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-ZVfuSœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-81Exw{œfieldNameœ:œdynamic_owner_emailœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}",
|
|
"selected": false,
|
|
"source": "SecretInput-ZVfuS",
|
|
"sourceHandle": "{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-ZVfuSœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}",
|
|
"target": "AdvancedDynamicFormBuilder-81Exw",
|
|
"targetHandle": "{œfieldNameœ:œdynamic_owner_emailœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}"
|
|
},
|
|
{
|
|
"animated": false,
|
|
"className": "",
|
|
"data": {
|
|
"sourceHandle": {
|
|
"dataType": "SecretInput",
|
|
"id": "SecretInput-Iqtxd",
|
|
"name": "text",
|
|
"output_types": [
|
|
"Message"
|
|
]
|
|
},
|
|
"targetHandle": {
|
|
"fieldName": "dynamic_owner_name",
|
|
"id": "AdvancedDynamicFormBuilder-81Exw",
|
|
"inputTypes": [
|
|
"Text",
|
|
"Message"
|
|
],
|
|
"type": "str"
|
|
}
|
|
},
|
|
"id": "xy-edge__SecretInput-Iqtxd{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-Iqtxdœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-AdvancedDynamicFormBuilder-81Exw{œfieldNameœ:œdynamic_owner_nameœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}",
|
|
"selected": false,
|
|
"source": "SecretInput-Iqtxd",
|
|
"sourceHandle": "{œdataTypeœ:œSecretInputœ,œidœ:œSecretInput-Iqtxdœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}",
|
|
"target": "AdvancedDynamicFormBuilder-81Exw",
|
|
"targetHandle": "{œfieldNameœ:œdynamic_owner_nameœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œinputTypesœ:[œTextœ,œMessageœ],œtypeœ:œstrœ}"
|
|
},
|
|
{
|
|
"animated": false,
|
|
"className": "not-running",
|
|
"data": {
|
|
"sourceHandle": {
|
|
"dataType": "AdvancedDynamicFormBuilder",
|
|
"id": "AdvancedDynamicFormBuilder-81Exw",
|
|
"name": "form_data",
|
|
"output_types": [
|
|
"Data"
|
|
]
|
|
},
|
|
"targetHandle": {
|
|
"fieldName": "docs_metadata",
|
|
"id": "OpenSearchHybrid-Ve6bS",
|
|
"inputTypes": [
|
|
"Data"
|
|
],
|
|
"type": "table"
|
|
}
|
|
},
|
|
"id": "xy-edge__AdvancedDynamicFormBuilder-81Exw{œdataTypeœ:œAdvancedDynamicFormBuilderœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œnameœ:œform_dataœ,œoutput_typesœ:[œDataœ]}-OpenSearchHybrid-Ve6bS{œfieldNameœ:œdocs_metadataœ,œidœ:œOpenSearchHybrid-Ve6bSœ,œinputTypesœ:[œDataœ],œtypeœ:œtableœ}",
|
|
"selected": false,
|
|
"source": "AdvancedDynamicFormBuilder-81Exw",
|
|
"sourceHandle": "{œdataTypeœ:œAdvancedDynamicFormBuilderœ,œidœ:œAdvancedDynamicFormBuilder-81Exwœ,œnameœ:œform_dataœ,œoutput_typesœ:[œDataœ]}",
|
|
"target": "OpenSearchHybrid-Ve6bS",
|
|
"targetHandle": "{œfieldNameœ:œdocs_metadataœ,œidœ:œOpenSearchHybrid-Ve6bSœ,œinputTypesœ:[œDataœ],œtypeœ:œtableœ}"
|
|
}
|
|
],
|
|
"nodes": [
|
|
{
|
|
"data": {
|
|
"description": "Split text into chunks based on specified criteria.",
|
|
"display_name": "Split Text",
|
|
"id": "SplitText-QIKhg",
|
|
"node": {
|
|
"base_classes": [
|
|
"DataFrame"
|
|
],
|
|
"beta": false,
|
|
"conditional_paths": [],
|
|
"custom_fields": {},
|
|
"description": "Split text into chunks based on specified criteria.",
|
|
"display_name": "Split Text",
|
|
"documentation": "https://docs.langflow.org/components-processing#split-text",
|
|
"edited": true,
|
|
"field_order": [
|
|
"data_inputs",
|
|
"chunk_overlap",
|
|
"chunk_size",
|
|
"separator",
|
|
"text_key",
|
|
"keep_separator"
|
|
],
|
|
"frozen": false,
|
|
"icon": "scissors-line-dashed",
|
|
"legacy": false,
|
|
"lf_version": "1.6.0",
|
|
"metadata": {
|
|
"code_hash": "f2867efda61f",
|
|
"dependencies": {
|
|
"dependencies": [
|
|
{
|
|
"name": "langchain_text_splitters",
|
|
"version": "0.3.9"
|
|
},
|
|
{
|
|
"name": "lfx",
|
|
"version": null
|
|
}
|
|
],
|
|
"total_dependencies": 2
|
|
},
|
|
"module": "custom_components.split_text"
|
|
},
|
|
"minimized": false,
|
|
"output_types": [],
|
|
"outputs": [
|
|
{
|
|
"allows_loop": false,
|
|
"cache": true,
|
|
"display_name": "Chunks",
|
|
"group_outputs": false,
|
|
"hidden": null,
|
|
"method": "split_text",
|
|
"name": "dataframe",
|
|
"options": null,
|
|
"required_inputs": null,
|
|
"selected": "DataFrame",
|
|
"tool_mode": true,
|
|
"types": [
|
|
"DataFrame"
|
|
],
|
|
"value": "__UNDEFINED__"
|
|
}
|
|
],
|
|
"pinned": false,
|
|
"template": {
|
|
"_type": "Component",
|
|
"chunk_overlap": {
|
|
"_input_type": "IntInput",
|
|
"advanced": false,
|
|
"display_name": "Chunk Overlap",
|
|
"dynamic": false,
|
|
"info": "Number of characters to overlap between chunks.",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"name": "chunk_overlap",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "int",
|
|
"value": 200
|
|
},
|
|
"chunk_size": {
|
|
"_input_type": "IntInput",
|
|
"advanced": false,
|
|
"display_name": "Chunk Size",
|
|
"dynamic": false,
|
|
"info": "The maximum length of each chunk. Text is first split by separator, then chunks are merged up to this size. Individual splits larger than this won't be further divided.",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"name": "chunk_size",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "int",
|
|
"value": 1000
|
|
},
|
|
"code": {
|
|
"advanced": true,
|
|
"dynamic": true,
|
|
"fileTypes": [],
|
|
"file_path": "",
|
|
"info": "",
|
|
"list": false,
|
|
"load_from_db": false,
|
|
"multiline": true,
|
|
"name": "code",
|
|
"password": false,
|
|
"placeholder": "",
|
|
"required": true,
|
|
"show": true,
|
|
"title_case": false,
|
|
"type": "code",
|
|
"value": "from langchain_text_splitters import CharacterTextSplitter\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.io import DropdownInput, HandleInput, IntInput, MessageTextInput, Output\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.utils.util import unescape_string\n\n\nclass SplitTextComponent(Component):\n display_name: str = \"Split Text\"\n description: str = \"Split text into chunks based on specified criteria.\"\n documentation: str = \"https://docs.langflow.org/components-processing#split-text\"\n icon = \"scissors-line-dashed\"\n name = \"SplitText\"\n\n inputs = [\n HandleInput(\n name=\"data_inputs\",\n display_name=\"Input\",\n info=\"The data with texts to split in chunks.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n IntInput(\n name=\"chunk_overlap\",\n display_name=\"Chunk Overlap\",\n info=\"Number of characters to overlap between chunks.\",\n value=200,\n ),\n IntInput(\n name=\"chunk_size\",\n display_name=\"Chunk Size\",\n info=(\n \"The maximum length of each chunk. Text is first split by separator, \"\n \"then chunks are merged up to this size. \"\n \"Individual splits larger than this won't be further divided.\"\n ),\n value=1000,\n ),\n MessageTextInput(\n name=\"separator\",\n display_name=\"Separator\",\n info=(\n \"The character to split on. Use \\\\n for newline. \"\n \"Examples: \\\\n\\\\n for paragraphs, \\\\n for lines, . for sentences\"\n ),\n value=\"\\n\",\n ),\n MessageTextInput(\n name=\"text_key\",\n display_name=\"Text Key\",\n info=\"The key to use for the text column.\",\n value=\"text\",\n advanced=True,\n ),\n DropdownInput(\n name=\"keep_separator\",\n display_name=\"Keep Separator\",\n info=\"Whether to keep the separator in the output chunks and where to place it.\",\n options=[\"False\", \"True\", \"Start\", \"End\"],\n value=\"False\",\n advanced=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Chunks\", name=\"dataframe\", method=\"split_text\"),\n ]\n\n def _docs_to_data(self, docs) -> list[Data]:\n return [Data(text=doc.page_content, data=doc.metadata) for doc in docs]\n\n def _fix_separator(self, separator: str) -> str:\n \"\"\"Fix common separator issues and convert to proper format.\"\"\"\n if separator == \"/n\":\n return \"\\n\"\n if separator == \"/t\":\n return \"\\t\"\n return separator\n\n def split_text_base(self):\n separator = self._fix_separator(self.separator)\n separator = unescape_string(separator)\n\n if isinstance(self.data_inputs, DataFrame):\n if not len(self.data_inputs):\n msg = \"DataFrame is empty\"\n raise TypeError(msg)\n\n self.data_inputs.text_key = self.text_key\n try:\n documents = self.data_inputs.to_lc_documents()\n except Exception as e:\n msg = f\"Error converting DataFrame to documents: {e}\"\n raise TypeError(msg) from e\n elif isinstance(self.data_inputs, Message):\n self.data_inputs = [self.data_inputs.to_data()]\n return self.split_text_base()\n else:\n if not self.data_inputs:\n msg = \"No data inputs provided\"\n raise TypeError(msg)\n\n documents = []\n if isinstance(self.data_inputs, Data):\n self.data_inputs.text_key = self.text_key\n documents = [self.data_inputs.to_lc_document()]\n else:\n try:\n documents = [input_.to_lc_document() for input_ in self.data_inputs if isinstance(input_, Data)]\n if not documents:\n msg = f\"No valid Data inputs found in {type(self.data_inputs)}\"\n raise TypeError(msg)\n except AttributeError as e:\n msg = f\"Invalid input type in collection: {e}\"\n raise TypeError(msg) from e\n try:\n # Convert string 'False'/'True' to boolean\n keep_sep = self.keep_separator\n if isinstance(keep_sep, str):\n if keep_sep.lower() == \"false\":\n keep_sep = False\n elif keep_sep.lower() == \"true\":\n keep_sep = True\n # 'start' and 'end' are kept as strings\n\n splitter = CharacterTextSplitter(\n chunk_overlap=self.chunk_overlap,\n chunk_size=self.chunk_size,\n separator=separator,\n keep_separator=keep_sep,\n )\n return splitter.split_documents(documents)\n except Exception as e:\n msg = f\"Error splitting text: {e}\"\n raise TypeError(msg) from e\n\n def split_text(self) -> DataFrame:\n return DataFrame(self._docs_to_data(self.split_text_base()))\n"
|
|
},
|
|
"data_inputs": {
|
|
"_input_type": "HandleInput",
|
|
"advanced": false,
|
|
"display_name": "Input",
|
|
"dynamic": false,
|
|
"info": "The data with texts to split in chunks.",
|
|
"input_types": [
|
|
"Data",
|
|
"DataFrame",
|
|
"Message"
|
|
],
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"name": "data_inputs",
|
|
"placeholder": "",
|
|
"required": true,
|
|
"show": true,
|
|
"title_case": false,
|
|
"trace_as_metadata": true,
|
|
"type": "other",
|
|
"value": ""
|
|
},
|
|
"keep_separator": {
|
|
"_input_type": "DropdownInput",
|
|
"advanced": true,
|
|
"combobox": false,
|
|
"dialog_inputs": {},
|
|
"display_name": "Keep Separator",
|
|
"dynamic": false,
|
|
"external_options": {},
|
|
"info": "Whether to keep the separator in the output chunks and where to place it.",
|
|
"name": "keep_separator",
|
|
"options": [
|
|
"False",
|
|
"True",
|
|
"Start",
|
|
"End"
|
|
],
|
|
"options_metadata": [],
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"toggle": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "str",
|
|
"value": "False"
|
|
},
|
|
"separator": {
|
|
"_input_type": "MessageTextInput",
|
|
"advanced": false,
|
|
"display_name": "Separator",
|
|
"dynamic": false,
|
|
"info": "The character to split on. Use \\n for newline. Examples: \\n\\n for paragraphs, \\n for lines, . for sentences",
|
|
"input_types": [
|
|
"Message"
|
|
],
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"load_from_db": false,
|
|
"name": "separator",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_input": true,
|
|
"trace_as_metadata": true,
|
|
"type": "str",
|
|
"value": "\n"
|
|
},
|
|
"text_key": {
|
|
"_input_type": "MessageTextInput",
|
|
"advanced": true,
|
|
"display_name": "Text Key",
|
|
"dynamic": false,
|
|
"info": "The key to use for the text column.",
|
|
"input_types": [
|
|
"Message"
|
|
],
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"load_from_db": false,
|
|
"name": "text_key",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_input": true,
|
|
"trace_as_metadata": true,
|
|
"type": "str",
|
|
"value": "text"
|
|
}
|
|
},
|
|
"tool_mode": false
|
|
},
|
|
"selected_output": "chunks",
|
|
"type": "SplitText"
|
|
},
|
|
"dragging": false,
|
|
"height": 475,
|
|
"id": "SplitText-QIKhg",
|
|
"measured": {
|
|
"height": 475,
|
|
"width": 320
|
|
},
|
|
"position": {
|
|
"x": 1729.1788373023007,
|
|
"y": 1330.8003441546418
|
|
},
|
|
"positionAbsolute": {
|
|
"x": 1683.4543896546102,
|
|
"y": 1350.7871623588553
|
|
},
|
|
"selected": false,
|
|
"type": "genericNode",
|
|
"width": 320
|
|
},
|
|
{
|
|
"data": {
|
|
"id": "File-PSU37",
|
|
"node": {
|
|
"base_classes": [
|
|
"Message"
|
|
],
|
|
"beta": false,
|
|
"conditional_paths": [],
|
|
"custom_fields": {},
|
|
"description": "Loads content from one or more files.",
|
|
"display_name": "File",
|
|
"documentation": "https://docs.langflow.org/components-data#file",
|
|
"edited": true,
|
|
"field_order": [
|
|
"path",
|
|
"file_path",
|
|
"separator",
|
|
"silent_errors",
|
|
"delete_server_file_after_processing",
|
|
"ignore_unsupported_extensions",
|
|
"ignore_unspecified_files",
|
|
"advanced_mode",
|
|
"pipeline",
|
|
"ocr_engine",
|
|
"md_image_placeholder",
|
|
"md_page_break_placeholder",
|
|
"doc_key",
|
|
"use_multithreading",
|
|
"concurrency_multithreading",
|
|
"markdown"
|
|
],
|
|
"frozen": false,
|
|
"icon": "file-text",
|
|
"last_updated": "2025-09-26T14:37:42.811Z",
|
|
"legacy": false,
|
|
"lf_version": "1.6.0",
|
|
"metadata": {
|
|
"code_hash": "9a1d497f4f91",
|
|
"dependencies": {
|
|
"dependencies": [
|
|
{
|
|
"name": "lfx",
|
|
"version": null
|
|
}
|
|
],
|
|
"total_dependencies": 1
|
|
},
|
|
"module": "custom_components.file"
|
|
},
|
|
"minimized": false,
|
|
"output_types": [],
|
|
"outputs": [
|
|
{
|
|
"allows_loop": false,
|
|
"cache": true,
|
|
"display_name": "Raw Content",
|
|
"group_outputs": false,
|
|
"hidden": null,
|
|
"method": "load_files_message",
|
|
"name": "message",
|
|
"options": null,
|
|
"required_inputs": null,
|
|
"selected": "Message",
|
|
"tool_mode": true,
|
|
"types": [
|
|
"Message"
|
|
],
|
|
"value": "__UNDEFINED__"
|
|
}
|
|
],
|
|
"pinned": false,
|
|
"template": {
|
|
"_type": "Component",
|
|
"advanced_mode": {
|
|
"_input_type": "BoolInput",
|
|
"advanced": false,
|
|
"display_name": "Advanced Parser",
|
|
"dynamic": false,
|
|
"info": "Enable advanced document processing and export with Docling for PDFs, images, and office documents. Available only for single file processing.Note that advanced document processing can consume significant resources.",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"name": "advanced_mode",
|
|
"placeholder": "",
|
|
"real_time_refresh": true,
|
|
"required": false,
|
|
"show": false,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "bool",
|
|
"value": false
|
|
},
|
|
"code": {
|
|
"advanced": true,
|
|
"dynamic": true,
|
|
"fileTypes": [],
|
|
"file_path": "",
|
|
"info": "",
|
|
"list": false,
|
|
"load_from_db": false,
|
|
"multiline": true,
|
|
"name": "code",
|
|
"password": false,
|
|
"placeholder": "",
|
|
"required": true,
|
|
"show": true,
|
|
"title_case": false,
|
|
"type": "code",
|
|
"value": "\"\"\"Enhanced file component with Docling support and process isolation.\n\nNotes:\n-----\n- ALL Docling parsing/export runs in a separate OS process to prevent memory\n growth and native library state from impacting the main Langflow process.\n- Standard text/structured parsing continues to use existing BaseFileComponent\n utilities (and optional threading via `parallel_load_data`).\n\"\"\"\n\nfrom __future__ import annotations\n\nimport json\nimport subprocess\nimport sys\nimport textwrap\nfrom copy import deepcopy\nfrom typing import Any\n\nfrom lfx.base.data.base_file import BaseFileComponent\nfrom lfx.base.data.utils import TEXT_FILE_TYPES, parallel_load_data, parse_text_file_to_data\nfrom lfx.inputs.inputs import DropdownInput, MessageTextInput, StrInput\nfrom lfx.io import BoolInput, FileInput, IntInput, Output\nfrom lfx.schema import DataFrame # noqa: TC001\nfrom lfx.schema.data import Data\nfrom lfx.schema.message import Message\n\n\nclass FileComponent(BaseFileComponent):\n \"\"\"File component with optional Docling processing (isolated in a subprocess).\"\"\"\n\n display_name = \"File\"\n description = \"Loads content from one or more files.\"\n documentation: str = \"https://docs.langflow.org/components-data#file\"\n icon = \"file-text\"\n name = \"File\"\n\n # Docling-supported/compatible extensions; TEXT_FILE_TYPES are supported by the base loader.\n VALID_EXTENSIONS = [\n *TEXT_FILE_TYPES,\n \"adoc\",\n \"asciidoc\",\n \"asc\",\n \"bmp\",\n \"dotx\",\n \"dotm\",\n \"docm\",\n \"jpeg\",\n \"png\",\n \"potx\",\n \"ppsx\",\n \"pptm\",\n \"potm\",\n \"ppsm\",\n \"pptx\",\n \"tiff\",\n \"xls\",\n \"xlsx\",\n \"xhtml\",\n \"webp\",\n ]\n\n # Fixed export settings used when markdown export is requested.\n EXPORT_FORMAT = \"Markdown\"\n IMAGE_MODE = \"placeholder\"\n\n _base_inputs = deepcopy(BaseFileComponent.get_base_inputs())\n\n for input_item in _base_inputs:\n if isinstance(input_item, FileInput) and input_item.name == \"path\":\n input_item.real_time_refresh = True\n break\n\n inputs = [\n *_base_inputs,\n BoolInput(\n name=\"advanced_mode\",\n display_name=\"Advanced Parser\",\n value=False,\n real_time_refresh=True,\n info=(\n \"Enable advanced document processing and export with Docling for PDFs, images, and office documents. \"\n \"Available only for single file processing.\"\n \"Note that advanced document processing can consume significant resources.\"\n ),\n show=False,\n ),\n DropdownInput(\n name=\"pipeline\",\n display_name=\"Pipeline\",\n info=\"Docling pipeline to use\",\n options=[\"standard\", \"vlm\"],\n value=\"standard\",\n advanced=True,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"ocr_engine\",\n display_name=\"OCR Engine\",\n info=\"OCR engine to use. Only available when pipeline is set to 'standard'.\",\n options=[\"None\", \"easyocr\"],\n value=\"easyocr\",\n show=False,\n advanced=True,\n ),\n StrInput(\n name=\"md_image_placeholder\",\n display_name=\"Image placeholder\",\n info=\"Specify the image placeholder for markdown exports.\",\n value=\"<!-- image -->\",\n advanced=True,\n show=False,\n ),\n StrInput(\n name=\"md_page_break_placeholder\",\n display_name=\"Page break placeholder\",\n info=\"Add this placeholder between pages in the markdown output.\",\n value=\"\",\n advanced=True,\n show=False,\n ),\n MessageTextInput(\n name=\"doc_key\",\n display_name=\"Doc Key\",\n info=\"The key to use for the DoclingDocument column.\",\n value=\"doc\",\n advanced=True,\n show=False,\n ),\n # Deprecated input retained for backward-compatibility.\n BoolInput(\n name=\"use_multithreading\",\n display_name=\"[Deprecated] Use Multithreading\",\n advanced=True,\n value=True,\n info=\"Set 'Processing Concurrency' greater than 1 to enable multithreading.\",\n ),\n IntInput(\n name=\"concurrency_multithreading\",\n display_name=\"Processing Concurrency\",\n advanced=True,\n info=\"When multiple files are being processed, the number of files to process concurrently.\",\n value=1,\n ),\n BoolInput(\n name=\"markdown\",\n display_name=\"Markdown Export\",\n info=\"Export processed documents to Markdown format. Only available when advanced mode is enabled.\",\n value=False,\n show=False,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Raw Content\", name=\"message\", method=\"load_files_message\"),\n ]\n\n # ------------------------------ UI helpers --------------------------------------\n\n def _path_value(self, template: dict) -> list[str]:\n \"\"\"Return the list of currently selected file paths from the template.\"\"\"\n return template.get(\"path\", {}).get(\"file_path\", [])\n\n def update_build_config(\n self,\n build_config: dict[str, Any],\n field_value: Any,\n field_name: str | None = None,\n ) -> dict[str, Any]:\n \"\"\"Show/hide Advanced Parser and related fields based on selection context.\"\"\"\n if field_name == \"path\":\n paths = self._path_value(build_config)\n file_path = paths[0] if paths else \"\"\n file_count = len(field_value) if field_value else 0\n\n # Advanced mode only for single (non-tabular) file\n allow_advanced = file_count == 1 and not file_path.endswith((\".csv\", \".xlsx\", \".parquet\"))\n build_config[\"advanced_mode\"][\"show\"] = allow_advanced\n if not allow_advanced:\n build_config[\"advanced_mode\"][\"value\"] = False\n for f in (\"pipeline\", \"ocr_engine\", \"doc_key\", \"md_image_placeholder\", \"md_page_break_placeholder\"):\n if f in build_config:\n build_config[f][\"show\"] = False\n\n # Docling Processing\n elif field_name == \"advanced_mode\":\n for f in (\"pipeline\", \"ocr_engine\", \"doc_key\", \"md_image_placeholder\", \"md_page_break_placeholder\"):\n if f in build_config:\n build_config[f][\"show\"] = bool(field_value)\n\n elif field_name == \"pipeline\":\n if field_value == \"standard\":\n build_config[\"ocr_engine\"][\"show\"] = True\n build_config[\"ocr_engine\"][\"value\"] = \"easyocr\"\n else:\n build_config[\"ocr_engine\"][\"show\"] = False\n build_config[\"ocr_engine\"][\"value\"] = \"None\"\n\n return build_config\n\n def update_outputs(self, frontend_node: dict[str, Any], field_name: str, field_value: Any) -> dict[str, Any]: # noqa: ARG002\n \"\"\"Dynamically show outputs based on file count/type and advanced mode.\"\"\"\n if field_name not in [\"path\", \"advanced_mode\", \"pipeline\"]:\n return frontend_node\n\n template = frontend_node.get(\"template\", {})\n paths = self._path_value(template)\n if not paths:\n return frontend_node\n\n frontend_node[\"outputs\"] = []\n if len(paths) == 1:\n file_path = paths[0] if field_name == \"path\" else frontend_node[\"template\"][\"path\"][\"file_path\"][0]\n if file_path.endswith((\".csv\", \".xlsx\", \".parquet\")):\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Content\", name=\"dataframe\", method=\"load_files_structured\"),\n )\n elif file_path.endswith(\".json\"):\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Content\", name=\"json\", method=\"load_files_json\"),\n )\n\n advanced_mode = frontend_node.get(\"template\", {}).get(\"advanced_mode\", {}).get(\"value\", False)\n if advanced_mode:\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Structured Output\", name=\"advanced_dataframe\", method=\"load_files_dataframe\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Markdown\", name=\"advanced_markdown\", method=\"load_files_markdown\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"File Path\", name=\"path\", method=\"load_files_path\"),\n )\n else:\n frontend_node[\"outputs\"].append(\n Output(display_name=\"Raw Content\", name=\"message\", method=\"load_files_message\"),\n )\n frontend_node[\"outputs\"].append(\n Output(display_name=\"File Path\", name=\"path\", method=\"load_files_path\"),\n )\n else:\n # Multiple files => DataFrame output; advanced parser disabled\n frontend_node[\"outputs\"].append(Output(display_name=\"Files\", name=\"dataframe\", method=\"load_files\"))\n\n return frontend_node\n\n # ------------------------------ Core processing ----------------------------------\n\n def _is_docling_compatible(self, file_path: str) -> bool:\n \"\"\"Lightweight extension gate for Docling-compatible types.\"\"\"\n docling_exts = (\n \".adoc\",\n \".asciidoc\",\n \".asc\",\n \".bmp\",\n \".csv\",\n \".dotx\",\n \".dotm\",\n \".docm\",\n \".docx\",\n \".htm\",\n \".html\",\n \".jpeg\",\n \".json\",\n \".md\",\n \".pdf\",\n \".png\",\n \".potx\",\n \".ppsx\",\n \".pptm\",\n \".potm\",\n \".ppsm\",\n \".pptx\",\n \".tiff\",\n \".txt\",\n \".xls\",\n \".xlsx\",\n \".xhtml\",\n \".xml\",\n \".webp\",\n )\n return file_path.lower().endswith(docling_exts)\n\n def _process_docling_in_subprocess(self, file_path: str) -> Data | None:\n \"\"\"Run Docling in a separate OS process and map the result to a Data object.\n\n We avoid multiprocessing pickling by launching `python -c \"<script>\"` and\n passing JSON config via stdin. The child prints a JSON result to stdout.\n \"\"\"\n if not file_path:\n return None\n\n args: dict[str, Any] = {\n \"file_path\": file_path,\n \"markdown\": bool(self.markdown),\n \"image_mode\": str(self.IMAGE_MODE),\n \"md_image_placeholder\": str(self.md_image_placeholder),\n \"md_page_break_placeholder\": str(self.md_page_break_placeholder),\n \"pipeline\": str(self.pipeline),\n \"ocr_engine\": (\n self.ocr_engine if self.ocr_engine and self.ocr_engine != \"None\" and self.pipeline != \"vlm\" else None\n ),\n }\n\n self.log(f\"Starting Docling subprocess for file: {file_path}\")\n self.log(args)\n\n # Child script for isolating the docling processing\n child_script = textwrap.dedent(\n r\"\"\"\n import json, sys\n\n def try_imports():\n # Strategy 1: latest layout\n try:\n from docling.datamodel.base_models import ConversionStatus, InputFormat # type: ignore\n from docling.document_converter import DocumentConverter # type: ignore\n from docling_core.types.doc import ImageRefMode # type: ignore\n return ConversionStatus, InputFormat, DocumentConverter, ImageRefMode, \"latest\"\n except Exception:\n pass\n # Strategy 2: alternative layout\n try:\n from docling.document_converter import DocumentConverter # type: ignore\n try:\n from docling_core.types import ConversionStatus, InputFormat # type: ignore\n except Exception:\n try:\n from docling.datamodel import ConversionStatus, InputFormat # type: ignore\n except Exception:\n class ConversionStatus: SUCCESS = \"success\"\n class InputFormat:\n PDF=\"pdf\"; IMAGE=\"image\"\n try:\n from docling_core.types.doc import ImageRefMode # type: ignore\n except Exception:\n class ImageRefMode:\n PLACEHOLDER=\"placeholder\"; EMBEDDED=\"embedded\"\n return ConversionStatus, InputFormat, DocumentConverter, ImageRefMode, \"alternative\"\n except Exception:\n pass\n # Strategy 3: basic converter only\n try:\n from docling.document_converter import DocumentConverter # type: ignore\n class ConversionStatus: SUCCESS = \"success\"\n class InputFormat:\n PDF=\"pdf\"; IMAGE=\"image\"\n class ImageRefMode:\n PLACEHOLDER=\"placeholder\"; EMBEDDED=\"embedded\"\n return ConversionStatus, InputFormat, DocumentConverter, ImageRefMode, \"basic\"\n except Exception as e:\n raise ImportError(f\"Docling imports failed: {e}\") from e\n\n def create_converter(strategy, input_format, DocumentConverter, pipeline, ocr_engine):\n # --- Standard PDF/IMAGE pipeline (your existing behavior), with optional OCR ---\n if pipeline == \"standard\":\n try:\n from docling.datamodel.pipeline_options import PdfPipelineOptions # type: ignore\n from docling.document_converter import PdfFormatOption # type: ignore\n\n pipe = PdfPipelineOptions()\n pipe.do_ocr = False\n\n if ocr_engine:\n try:\n from docling.models.factories import get_ocr_factory # type: ignore\n pipe.do_ocr = True\n fac = get_ocr_factory(allow_external_plugins=False)\n pipe.ocr_options = fac.create_options(kind=ocr_engine)\n except Exception:\n # If OCR setup fails, disable it\n pipe.do_ocr = False\n\n fmt = {}\n if hasattr(input_format, \"PDF\"):\n fmt[getattr(input_format, \"PDF\")] = PdfFormatOption(pipeline_options=pipe)\n if hasattr(input_format, \"IMAGE\"):\n fmt[getattr(input_format, \"IMAGE\")] = PdfFormatOption(pipeline_options=pipe)\n\n return DocumentConverter(format_options=fmt)\n except Exception:\n return DocumentConverter()\n\n # --- Vision-Language Model (VLM) pipeline ---\n if pipeline == \"vlm\":\n try:\n from docling.pipeline.vlm_pipeline import VlmPipeline\n from docling.document_converter import PdfFormatOption # type: ignore\n\n vl_pipe = VlmPipelineOptions()\n\n # VLM paths generally don't need OCR; keep OCR off by default here.\n fmt = {}\n if hasattr(input_format, \"PDF\"):\n fmt[getattr(input_format, \"PDF\")] = PdfFormatOption(pipeline_cls=VlmPipeline)\n if hasattr(input_format, \"IMAGE\"):\n fmt[getattr(input_format, \"IMAGE\")] = PdfFormatOption(pipeline_cls=VlmPipeline)\n\n return DocumentConverter(format_options=fmt)\n except Exception:\n return DocumentConverter()\n\n # --- Fallback: default converter with no special options ---\n return DocumentConverter()\n\n def export_markdown(document, ImageRefMode, image_mode, img_ph, pg_ph):\n try:\n mode = getattr(ImageRefMode, image_mode.upper(), image_mode)\n return document.export_to_markdown(\n image_mode=mode,\n image_placeholder=img_ph,\n page_break_placeholder=pg_ph,\n )\n except Exception:\n try:\n return document.export_to_text()\n except Exception:\n return str(document)\n\n def to_rows(doc_dict):\n rows = []\n for t in doc_dict.get(\"texts\", []):\n prov = t.get(\"prov\") or []\n page_no = None\n if prov and isinstance(prov, list) and isinstance(prov[0], dict):\n page_no = prov[0].get(\"page_no\")\n rows.append({\n \"page_no\": page_no,\n \"label\": t.get(\"label\"),\n \"text\": t.get(\"text\"),\n \"level\": t.get(\"level\"),\n })\n return rows\n\n def main():\n cfg = json.loads(sys.stdin.read())\n file_path = cfg[\"file_path\"]\n markdown = cfg[\"markdown\"]\n image_mode = cfg[\"image_mode\"]\n img_ph = cfg[\"md_image_placeholder\"]\n pg_ph = cfg[\"md_page_break_placeholder\"]\n pipeline = cfg[\"pipeline\"]\n ocr_engine = cfg.get(\"ocr_engine\")\n meta = {\"file_path\": file_path}\n\n try:\n ConversionStatus, InputFormat, DocumentConverter, ImageRefMode, strategy = try_imports()\n converter = create_converter(strategy, InputFormat, DocumentConverter, pipeline, ocr_engine)\n try:\n res = converter.convert(file_path)\n except Exception as e:\n print(json.dumps({\"ok\": False, \"error\": f\"Docling conversion error: {e}\", \"meta\": meta}))\n return\n\n ok = False\n if hasattr(res, \"status\"):\n try:\n ok = (res.status == ConversionStatus.SUCCESS) or (str(res.status).lower() == \"success\")\n except Exception:\n ok = (str(res.status).lower() == \"success\")\n if not ok and hasattr(res, \"document\"):\n ok = getattr(res, \"document\", None) is not None\n if not ok:\n print(json.dumps({\"ok\": False, \"error\": \"Docling conversion failed\", \"meta\": meta}))\n return\n\n doc = getattr(res, \"document\", None)\n if doc is None:\n print(json.dumps({\"ok\": False, \"error\": \"Docling produced no document\", \"meta\": meta}))\n return\n\n if markdown:\n text = export_markdown(doc, ImageRefMode, image_mode, img_ph, pg_ph)\n print(json.dumps({\"ok\": True, \"mode\": \"markdown\", \"text\": text, \"meta\": meta}))\n return\n\n # structured\n try:\n doc_dict = doc.export_to_dict()\n except Exception as e:\n print(json.dumps({\"ok\": False, \"error\": f\"Docling export_to_dict failed: {e}\", \"meta\": meta}))\n return\n\n rows = to_rows(doc_dict)\n print(json.dumps({\"ok\": True, \"mode\": \"structured\", \"doc\": rows, \"meta\": meta}))\n except Exception as e:\n print(\n json.dumps({\n \"ok\": False,\n \"error\": f\"Docling processing error: {e}\",\n \"meta\": {\"file_path\": file_path},\n })\n )\n\n if __name__ == \"__main__\":\n main()\n \"\"\"\n )\n\n # Validate file_path to avoid command injection or unsafe input\n if not isinstance(args[\"file_path\"], str) or any(c in args[\"file_path\"] for c in [\";\", \"|\", \"&\", \"$\", \"`\"]):\n return Data(data={\"error\": \"Unsafe file path detected.\", \"file_path\": args[\"file_path\"]})\n\n proc = subprocess.run( # noqa: S603\n [sys.executable, \"-u\", \"-c\", child_script],\n input=json.dumps(args).encode(\"utf-8\"),\n capture_output=True,\n check=False,\n )\n\n if not proc.stdout:\n err_msg = proc.stderr.decode(\"utf-8\", errors=\"replace\") or \"no output from child process\"\n return Data(data={\"error\": f\"Docling subprocess error: {err_msg}\", \"file_path\": file_path})\n\n try:\n result = json.loads(proc.stdout.decode(\"utf-8\"))\n except Exception as e: # noqa: BLE001\n err_msg = proc.stderr.decode(\"utf-8\", errors=\"replace\")\n return Data(\n data={\"error\": f\"Invalid JSON from Docling subprocess: {e}. stderr={err_msg}\", \"file_path\": file_path},\n )\n\n if not result.get(\"ok\"):\n return Data(data={\"error\": result.get(\"error\", \"Unknown Docling error\"), **result.get(\"meta\", {})})\n\n meta = result.get(\"meta\", {})\n if result.get(\"mode\") == \"markdown\":\n exported_content = str(result.get(\"text\", \"\"))\n return Data(\n text=exported_content,\n data={\"exported_content\": exported_content, \"export_format\": self.EXPORT_FORMAT, **meta},\n )\n\n rows = list(result.get(\"doc\", []))\n return Data(data={\"doc\": rows, \"export_format\": self.EXPORT_FORMAT, **meta})\n\n def process_files(\n self,\n file_list: list[BaseFileComponent.BaseFile],\n ) -> list[BaseFileComponent.BaseFile]:\n \"\"\"Process input files.\n\n - Single file + advanced_mode => Docling in a separate process.\n - Otherwise => standard parsing in current process (optionally threaded).\n \"\"\"\n if not file_list:\n msg = \"No files to process.\"\n raise ValueError(msg)\n\n def process_file_standard(file_path: str, *, silent_errors: bool = False) -> Data | None:\n try:\n return parse_text_file_to_data(file_path, silent_errors=silent_errors)\n except FileNotFoundError as e:\n self.log(f\"File not found: {file_path}. Error: {e}\")\n if not silent_errors:\n raise\n return None\n except Exception as e:\n self.log(f\"Unexpected error processing {file_path}: {e}\")\n if not silent_errors:\n raise\n return None\n\n # Advanced path: only for a single Docling-compatible file\n if len(file_list) == 1:\n file_path = str(file_list[0].path)\n if self.advanced_mode and self._is_docling_compatible(file_path):\n advanced_data: Data | None = self._process_docling_in_subprocess(file_path)\n\n # --- UNNEST: expand each element in `doc` to its own Data row\n payload = getattr(advanced_data, \"data\", {}) or {}\n doc_rows = payload.get(\"doc\")\n if isinstance(doc_rows, list):\n rows: list[Data | None] = [\n Data(\n data={\n \"file_path\": file_path,\n **(item if isinstance(item, dict) else {\"value\": item}),\n },\n )\n for item in doc_rows\n ]\n return self.rollup_data(file_list, rows)\n\n # If not structured, keep as-is (e.g., markdown export or error dict)\n return self.rollup_data(file_list, [advanced_data])\n\n # Standard multi-file (or single non-advanced) path\n concurrency = 1 if not self.use_multithreading else max(1, self.concurrency_multithreading)\n file_paths = [str(f.path) for f in file_list]\n self.log(f\"Starting parallel processing of {len(file_paths)} files with concurrency: {concurrency}.\")\n my_data = parallel_load_data(\n file_paths,\n silent_errors=self.silent_errors,\n load_function=process_file_standard,\n max_concurrency=concurrency,\n )\n return self.rollup_data(file_list, my_data)\n\n # ------------------------------ Output helpers -----------------------------------\n\n def load_files_helper(self) -> DataFrame:\n result = self.load_files()\n\n # Error condition - raise error if no text and an error is present\n if not hasattr(result, \"text\"):\n if hasattr(result, \"error\"):\n raise ValueError(result.error[0])\n msg = \"No content generated.\"\n raise ValueError(msg)\n\n return result\n\n def load_files_dataframe(self) -> DataFrame:\n \"\"\"Load files using advanced Docling processing and export to DataFrame format.\"\"\"\n self.markdown = False\n return self.load_files_helper()\n\n def load_files_markdown(self) -> Message:\n \"\"\"Load files using advanced Docling processing and export to Markdown format.\"\"\"\n self.markdown = True\n result = self.load_files_helper()\n return Message(text=str(result.text[0]))\n"
|
|
},
|
|
"concurrency_multithreading": {
|
|
"_input_type": "IntInput",
|
|
"advanced": true,
|
|
"display_name": "Processing Concurrency",
|
|
"dynamic": false,
|
|
"info": "When multiple files are being processed, the number of files to process concurrently.",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"name": "concurrency_multithreading",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "int",
|
|
"value": 1
|
|
},
|
|
"delete_server_file_after_processing": {
|
|
"_input_type": "BoolInput",
|
|
"advanced": true,
|
|
"display_name": "Delete Server File After Processing",
|
|
"dynamic": false,
|
|
"info": "If true, the Server File Path will be deleted after processing.",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"name": "delete_server_file_after_processing",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "bool",
|
|
"value": true
|
|
},
|
|
"doc_key": {
|
|
"_input_type": "MessageTextInput",
|
|
"advanced": true,
|
|
"display_name": "Doc Key",
|
|
"dynamic": false,
|
|
"info": "The key to use for the DoclingDocument column.",
|
|
"input_types": [
|
|
"Message"
|
|
],
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"load_from_db": false,
|
|
"name": "doc_key",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": false,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_input": true,
|
|
"trace_as_metadata": true,
|
|
"type": "str",
|
|
"value": "doc"
|
|
},
|
|
"file_path": {
|
|
"_input_type": "HandleInput",
|
|
"advanced": true,
|
|
"display_name": "Server File Path",
|
|
"dynamic": false,
|
|
"info": "Data object with a 'file_path' property pointing to server file or a Message object with a path to the file. Supercedes 'Path' but supports same file types.",
|
|
"input_types": [
|
|
"Data",
|
|
"Message"
|
|
],
|
|
"list": true,
|
|
"list_add_label": "Add More",
|
|
"name": "file_path",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"trace_as_metadata": true,
|
|
"type": "other",
|
|
"value": ""
|
|
},
|
|
"ignore_unspecified_files": {
|
|
"_input_type": "BoolInput",
|
|
"advanced": true,
|
|
"display_name": "Ignore Unspecified Files",
|
|
"dynamic": false,
|
|
"info": "If true, Data with no 'file_path' property will be ignored.",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"name": "ignore_unspecified_files",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "bool",
|
|
"value": false
|
|
},
|
|
"ignore_unsupported_extensions": {
|
|
"_input_type": "BoolInput",
|
|
"advanced": true,
|
|
"display_name": "Ignore Unsupported Extensions",
|
|
"dynamic": false,
|
|
"info": "If true, files with unsupported extensions will not be processed.",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"name": "ignore_unsupported_extensions",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "bool",
|
|
"value": true
|
|
},
|
|
"markdown": {
|
|
"_input_type": "BoolInput",
|
|
"advanced": false,
|
|
"display_name": "Markdown Export",
|
|
"dynamic": false,
|
|
"info": "Export processed documents to Markdown format. Only available when advanced mode is enabled.",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"name": "markdown",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": false,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "bool",
|
|
"value": false
|
|
},
|
|
"md_image_placeholder": {
|
|
"_input_type": "StrInput",
|
|
"advanced": true,
|
|
"display_name": "Image placeholder",
|
|
"dynamic": false,
|
|
"info": "Specify the image placeholder for markdown exports.",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"load_from_db": false,
|
|
"name": "md_image_placeholder",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": false,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "str",
|
|
"value": "<!-- image -->"
|
|
},
|
|
"md_page_break_placeholder": {
|
|
"_input_type": "StrInput",
|
|
"advanced": true,
|
|
"display_name": "Page break placeholder",
|
|
"dynamic": false,
|
|
"info": "Add this placeholder between pages in the markdown output.",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"load_from_db": false,
|
|
"name": "md_page_break_placeholder",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": false,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "str",
|
|
"value": ""
|
|
},
|
|
"ocr_engine": {
|
|
"_input_type": "DropdownInput",
|
|
"advanced": true,
|
|
"combobox": false,
|
|
"dialog_inputs": {},
|
|
"display_name": "OCR Engine",
|
|
"dynamic": false,
|
|
"external_options": {},
|
|
"info": "OCR engine to use. Only available when pipeline is set to 'standard'.",
|
|
"load_from_db": false,
|
|
"name": "ocr_engine",
|
|
"options": [
|
|
"None",
|
|
"easyocr"
|
|
],
|
|
"options_metadata": [],
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": false,
|
|
"title_case": false,
|
|
"toggle": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "str",
|
|
"value": ""
|
|
},
|
|
"path": {
|
|
"_input_type": "FileInput",
|
|
"advanced": false,
|
|
"display_name": "Files",
|
|
"dynamic": false,
|
|
"fileTypes": [
|
|
"csv",
|
|
"json",
|
|
"pdf",
|
|
"txt",
|
|
"md",
|
|
"mdx",
|
|
"yaml",
|
|
"yml",
|
|
"xml",
|
|
"html",
|
|
"htm",
|
|
"docx",
|
|
"py",
|
|
"sh",
|
|
"sql",
|
|
"js",
|
|
"ts",
|
|
"tsx",
|
|
"adoc",
|
|
"asciidoc",
|
|
"asc",
|
|
"bmp",
|
|
"dotx",
|
|
"dotm",
|
|
"docm",
|
|
"jpeg",
|
|
"png",
|
|
"potx",
|
|
"ppsx",
|
|
"pptm",
|
|
"potm",
|
|
"ppsm",
|
|
"pptx",
|
|
"tiff",
|
|
"xls",
|
|
"xlsx",
|
|
"xhtml",
|
|
"webp",
|
|
"zip",
|
|
"tar",
|
|
"tgz",
|
|
"bz2",
|
|
"gz"
|
|
],
|
|
"file_path": [],
|
|
"info": "Supported file extensions: csv, json, pdf, txt, md, mdx, yaml, yml, xml, html, htm, docx, py, sh, sql, js, ts, tsx, adoc, asciidoc, asc, bmp, dotx, dotm, docm, jpeg, png, potx, ppsx, pptm, potm, ppsm, pptx, tiff, xls, xlsx, xhtml, webp; optionally bundled in file extensions: zip, tar, tgz, bz2, gz",
|
|
"list": true,
|
|
"list_add_label": "Add More",
|
|
"name": "path",
|
|
"placeholder": "",
|
|
"real_time_refresh": true,
|
|
"required": false,
|
|
"show": true,
|
|
"temp_file": false,
|
|
"title_case": false,
|
|
"trace_as_metadata": true,
|
|
"type": "file",
|
|
"value": ""
|
|
},
|
|
"pipeline": {
|
|
"_input_type": "DropdownInput",
|
|
"advanced": true,
|
|
"combobox": false,
|
|
"dialog_inputs": {},
|
|
"display_name": "Pipeline",
|
|
"dynamic": false,
|
|
"external_options": {},
|
|
"info": "Docling pipeline to use",
|
|
"name": "pipeline",
|
|
"options": [
|
|
"standard",
|
|
"vlm"
|
|
],
|
|
"options_metadata": [],
|
|
"placeholder": "",
|
|
"real_time_refresh": true,
|
|
"required": false,
|
|
"show": false,
|
|
"title_case": false,
|
|
"toggle": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "str",
|
|
"value": "standard"
|
|
},
|
|
"separator": {
|
|
"_input_type": "StrInput",
|
|
"advanced": true,
|
|
"display_name": "Separator",
|
|
"dynamic": false,
|
|
"info": "Specify the separator to use between multiple outputs in Message format.",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"load_from_db": false,
|
|
"name": "separator",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "str",
|
|
"value": "\n\n"
|
|
},
|
|
"silent_errors": {
|
|
"_input_type": "BoolInput",
|
|
"advanced": true,
|
|
"display_name": "Silent Errors",
|
|
"dynamic": false,
|
|
"info": "If true, errors will not raise an exception.",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"name": "silent_errors",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "bool",
|
|
"value": false
|
|
},
|
|
"use_multithreading": {
|
|
"_input_type": "BoolInput",
|
|
"advanced": true,
|
|
"display_name": "[Deprecated] Use Multithreading",
|
|
"dynamic": false,
|
|
"info": "Set 'Processing Concurrency' greater than 1 to enable multithreading.",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"name": "use_multithreading",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "bool",
|
|
"value": true
|
|
}
|
|
},
|
|
"tool_mode": false
|
|
},
|
|
"selected_output": "message",
|
|
"showNode": true,
|
|
"type": "File"
|
|
},
|
|
"dragging": false,
|
|
"id": "File-PSU37",
|
|
"measured": {
|
|
"height": 214,
|
|
"width": 320
|
|
},
|
|
"position": {
|
|
"x": 1270.0395728258152,
|
|
"y": 1372.889208749833
|
|
},
|
|
"selected": false,
|
|
"type": "genericNode"
|
|
},
|
|
{
|
|
"data": {
|
|
"id": "OpenSearchHybrid-Ve6bS",
|
|
"node": {
|
|
"base_classes": [
|
|
"Data",
|
|
"DataFrame",
|
|
"VectorStore"
|
|
],
|
|
"beta": false,
|
|
"conditional_paths": [],
|
|
"custom_fields": {},
|
|
"description": "Store and search documents using OpenSearch with hybrid semantic and keyword search capabilities.",
|
|
"display_name": "OpenSearch",
|
|
"documentation": "",
|
|
"edited": true,
|
|
"field_order": [
|
|
"docs_metadata",
|
|
"opensearch_url",
|
|
"index_name",
|
|
"engine",
|
|
"space_type",
|
|
"ef_construction",
|
|
"m",
|
|
"ingest_data",
|
|
"search_query",
|
|
"should_cache_vector_store",
|
|
"embedding",
|
|
"vector_field",
|
|
"number_of_results",
|
|
"filter_expression",
|
|
"auth_mode",
|
|
"username",
|
|
"password",
|
|
"jwt_token",
|
|
"jwt_header",
|
|
"bearer_prefix",
|
|
"use_ssl",
|
|
"verify_certs"
|
|
],
|
|
"frozen": false,
|
|
"icon": "OpenSearch",
|
|
"legacy": false,
|
|
"metadata": {
|
|
"code_hash": "c81b23acb81a",
|
|
"dependencies": {
|
|
"dependencies": [
|
|
{
|
|
"name": "opensearchpy",
|
|
"version": "2.8.0"
|
|
},
|
|
{
|
|
"name": "lfx",
|
|
"version": null
|
|
}
|
|
],
|
|
"total_dependencies": 2
|
|
},
|
|
"module": "custom_components.opensearch"
|
|
},
|
|
"minimized": false,
|
|
"output_types": [],
|
|
"outputs": [
|
|
{
|
|
"allows_loop": false,
|
|
"cache": true,
|
|
"display_name": "Search Results",
|
|
"group_outputs": false,
|
|
"hidden": null,
|
|
"method": "search_documents",
|
|
"name": "search_results",
|
|
"options": null,
|
|
"required_inputs": null,
|
|
"selected": "Data",
|
|
"tool_mode": true,
|
|
"types": [
|
|
"Data"
|
|
],
|
|
"value": "__UNDEFINED__"
|
|
},
|
|
{
|
|
"allows_loop": false,
|
|
"cache": true,
|
|
"display_name": "DataFrame",
|
|
"group_outputs": false,
|
|
"hidden": null,
|
|
"method": "as_dataframe",
|
|
"name": "dataframe",
|
|
"options": null,
|
|
"required_inputs": null,
|
|
"selected": "DataFrame",
|
|
"tool_mode": true,
|
|
"types": [
|
|
"DataFrame"
|
|
],
|
|
"value": "__UNDEFINED__"
|
|
},
|
|
{
|
|
"allows_loop": false,
|
|
"cache": true,
|
|
"display_name": "Vector Store Connection",
|
|
"group_outputs": false,
|
|
"hidden": false,
|
|
"method": "as_vector_store",
|
|
"name": "vectorstoreconnection",
|
|
"options": null,
|
|
"required_inputs": null,
|
|
"selected": "VectorStore",
|
|
"tool_mode": true,
|
|
"types": [
|
|
"VectorStore"
|
|
],
|
|
"value": "__UNDEFINED__"
|
|
}
|
|
],
|
|
"pinned": false,
|
|
"template": {
|
|
"_type": "Component",
|
|
"auth_mode": {
|
|
"_input_type": "DropdownInput",
|
|
"advanced": false,
|
|
"combobox": false,
|
|
"dialog_inputs": {},
|
|
"display_name": "Authentication Mode",
|
|
"dynamic": false,
|
|
"external_options": {},
|
|
"info": "Authentication method: 'basic' for username/password authentication, or 'jwt' for JSON Web Token (Bearer) authentication.",
|
|
"load_from_db": false,
|
|
"name": "auth_mode",
|
|
"options": [
|
|
"basic",
|
|
"jwt"
|
|
],
|
|
"options_metadata": [],
|
|
"placeholder": "",
|
|
"real_time_refresh": true,
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"toggle": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "str",
|
|
"value": "jwt"
|
|
},
|
|
"bearer_prefix": {
|
|
"_input_type": "BoolInput",
|
|
"advanced": true,
|
|
"display_name": "Prefix 'Bearer '",
|
|
"dynamic": false,
|
|
"info": "",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"name": "bearer_prefix",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": false,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "bool",
|
|
"value": true
|
|
},
|
|
"code": {
|
|
"advanced": true,
|
|
"dynamic": true,
|
|
"fileTypes": [],
|
|
"file_path": "",
|
|
"info": "",
|
|
"list": false,
|
|
"load_from_db": false,
|
|
"multiline": true,
|
|
"name": "code",
|
|
"password": false,
|
|
"placeholder": "",
|
|
"required": true,
|
|
"show": true,
|
|
"title_case": false,
|
|
"type": "code",
|
|
"value": "from __future__ import annotations\n\nimport json\nimport uuid\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponent(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports document\n ingestion, vector embeddings, and advanced filtering with authentication options.\n\n Features:\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Hybrid search combining KNN vector similarity and keyword matching\n - Flexible authentication (Basic auth, JWT tokens)\n - Advanced filtering and aggregations\n - Metadata injection during document ingestion\n \"\"\"\n\n display_name: str = \"OpenSearch\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with hybrid semantic and keyword search capabilities.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n # advanced=True,\n input_types=[\"Data\"]\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"]),\n StrInput(\n name=\"vector_field\",\n display_name=\"Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=\"Name of the field in OpenSearch documents that stores the vector embeddings for similarity search.\",\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=False,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=False,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=True,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n }\n }\n },\n }\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our “vector store.”\n self.log(self.ingest_data)\n client = self.build_client()\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings\n - Creates appropriate index mappings\n - Bulk inserts documents with vectors\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n docs = self.ingest_data or []\n if not docs:\n self.log(\"No documents to ingest.\")\n return\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Generate embeddings\n vectors = self.embedding.embed_documents(texts)\n\n if not vectors:\n self.log(\"No vectors generated from documents.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=self.vector_field,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with proper KNN mapping...\")\n\n # Use the LangChain-style bulk ingestion\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=self.vector_field,\n text_field=\"text\",\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n self.log(f\"Successfully indexed {len(return_ids)} documents.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n # ---------- search (single hybrid path matching your tool) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform hybrid search combining vector similarity and keyword matching.\n\n This method executes a sophisticated search that combines:\n - K-nearest neighbor (KNN) vector similarity search (70% weight)\n - Multi-field keyword search with fuzzy matching (30% weight)\n - Optional filtering and score thresholds\n - Aggregations for faceted search results\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression (can be either A or B shape; see _coerce_filter_clauses)\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Embed the query\n vec = self.embedding.embed_query(q)\n\n # Build filter clauses (accept both shapes)\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Respect the tool's limit/threshold defaults\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build the same hybrid body as your SearchService\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"knn\": {\n self.vector_field: {\n \"vector\": vec,\n \"k\": 10, # fixed to match the tool\n \"boost\": 0.7,\n }\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3,\n }\n },\n ],\n \"minimum_should_match\": 1,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n if filter_clauses:\n body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n # top-level min_score (matches your tool)\n body[\"min_score\"] = score_threshold\n\n resp = client.search(index=self.index_name, body=body)\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the search using the\n configured search_query and returns results in Langflow's Data format.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n raw = self.search(self.search_query or \"\")\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n self.log(self.ingest_data)\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n if is_basic:\n build_config[\"jwt_token\"][\"value\"] = \"\"\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n"
|
|
},
|
|
"docs_metadata": {
|
|
"_input_type": "TableInput",
|
|
"advanced": false,
|
|
"display_name": "Document Metadata",
|
|
"dynamic": false,
|
|
"info": "Additional metadata key-value pairs to be added to all ingested documents. Useful for tagging documents with source information, categories, or other custom attributes.",
|
|
"input_types": [
|
|
"Data"
|
|
],
|
|
"is_list": true,
|
|
"list_add_label": "Add More",
|
|
"name": "docs_metadata",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"table_icon": "Table",
|
|
"table_schema": [
|
|
{
|
|
"description": "Key name",
|
|
"display_name": "Key",
|
|
"formatter": "text",
|
|
"name": "key",
|
|
"type": "str"
|
|
},
|
|
{
|
|
"description": "Value of the metadata",
|
|
"display_name": "Value",
|
|
"formatter": "text",
|
|
"name": "value",
|
|
"type": "str"
|
|
}
|
|
],
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"trigger_icon": "Table",
|
|
"trigger_text": "Open table",
|
|
"type": "table",
|
|
"value": []
|
|
},
|
|
"ef_construction": {
|
|
"_input_type": "IntInput",
|
|
"advanced": true,
|
|
"display_name": "EF Construction",
|
|
"dynamic": false,
|
|
"info": "Size of the dynamic candidate list during index construction. Higher values improve recall but increase indexing time and memory usage.",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"name": "ef_construction",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "int",
|
|
"value": 512
|
|
},
|
|
"embedding": {
|
|
"_input_type": "HandleInput",
|
|
"advanced": false,
|
|
"display_name": "Embedding",
|
|
"dynamic": false,
|
|
"info": "",
|
|
"input_types": [
|
|
"Embeddings"
|
|
],
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"name": "embedding",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"trace_as_metadata": true,
|
|
"type": "other",
|
|
"value": ""
|
|
},
|
|
"engine": {
|
|
"_input_type": "DropdownInput",
|
|
"advanced": true,
|
|
"combobox": false,
|
|
"dialog_inputs": {},
|
|
"display_name": "Vector Engine",
|
|
"dynamic": false,
|
|
"external_options": {},
|
|
"info": "Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.",
|
|
"load_from_db": false,
|
|
"name": "engine",
|
|
"options": [
|
|
"jvector",
|
|
"nmslib",
|
|
"faiss",
|
|
"lucene"
|
|
],
|
|
"options_metadata": [],
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"toggle": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "str",
|
|
"value": "nmslib"
|
|
},
|
|
"filter_expression": {
|
|
"_input_type": "MultilineInput",
|
|
"advanced": false,
|
|
"copy_field": false,
|
|
"display_name": "Search Filters (JSON)",
|
|
"dynamic": false,
|
|
"info": "Optional JSON configuration for search filtering, result limits, and score thresholds.\n\nFormat 1 - Explicit filters:\n{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, {\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\n\nFormat 2 - Context-style mapping:\n{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\n\nUse __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.",
|
|
"input_types": [
|
|
"Message"
|
|
],
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"load_from_db": false,
|
|
"multiline": true,
|
|
"name": "filter_expression",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_input": true,
|
|
"trace_as_metadata": true,
|
|
"type": "str",
|
|
"value": ""
|
|
},
|
|
"index_name": {
|
|
"_input_type": "StrInput",
|
|
"advanced": false,
|
|
"display_name": "Index Name",
|
|
"dynamic": false,
|
|
"info": "The OpenSearch index name where documents will be stored and searched. Will be created automatically if it doesn't exist.",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"load_from_db": false,
|
|
"name": "index_name",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "str",
|
|
"value": "documents"
|
|
},
|
|
"ingest_data": {
|
|
"_input_type": "HandleInput",
|
|
"advanced": false,
|
|
"display_name": "Ingest Data",
|
|
"dynamic": false,
|
|
"info": "",
|
|
"input_types": [
|
|
"Data",
|
|
"DataFrame"
|
|
],
|
|
"list": true,
|
|
"list_add_label": "Add More",
|
|
"name": "ingest_data",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"trace_as_metadata": true,
|
|
"type": "other",
|
|
"value": ""
|
|
},
|
|
"jwt_header": {
|
|
"_input_type": "StrInput",
|
|
"advanced": true,
|
|
"display_name": "JWT Header Name",
|
|
"dynamic": false,
|
|
"info": "",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"load_from_db": false,
|
|
"name": "jwt_header",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": false,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "str",
|
|
"value": "Authorization"
|
|
},
|
|
"jwt_token": {
|
|
"_input_type": "SecretStrInput",
|
|
"advanced": false,
|
|
"display_name": "JWT Token",
|
|
"dynamic": false,
|
|
"info": "Valid JSON Web Token for authentication. Will be sent in the Authorization header (with optional 'Bearer ' prefix).",
|
|
"input_types": [],
|
|
"load_from_db": false,
|
|
"name": "jwt_token",
|
|
"password": true,
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"type": "str",
|
|
"value": ""
|
|
},
|
|
"m": {
|
|
"_input_type": "IntInput",
|
|
"advanced": true,
|
|
"display_name": "M Parameter",
|
|
"dynamic": false,
|
|
"info": "Number of bidirectional connections for each vector in the HNSW graph. Higher values improve search quality but increase memory usage and indexing time.",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"name": "m",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "int",
|
|
"value": 16
|
|
},
|
|
"number_of_results": {
|
|
"_input_type": "IntInput",
|
|
"advanced": true,
|
|
"display_name": "Default Result Limit",
|
|
"dynamic": false,
|
|
"info": "Default maximum number of search results to return when no limit is specified in the filter expression.",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"load_from_db": false,
|
|
"name": "number_of_results",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "int",
|
|
"value": 15
|
|
},
|
|
"opensearch_url": {
|
|
"_input_type": "StrInput",
|
|
"advanced": false,
|
|
"display_name": "OpenSearch URL",
|
|
"dynamic": false,
|
|
"info": "The connection URL for your OpenSearch cluster (e.g., http://localhost:9200 for local development or your cloud endpoint).",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"load_from_db": false,
|
|
"name": "opensearch_url",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "str",
|
|
"value": "https://opensearch:9200"
|
|
},
|
|
"password": {
|
|
"_input_type": "SecretStrInput",
|
|
"advanced": false,
|
|
"display_name": "OpenSearch Password",
|
|
"dynamic": false,
|
|
"info": "",
|
|
"input_types": [],
|
|
"load_from_db": false,
|
|
"name": "password",
|
|
"password": true,
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": false,
|
|
"title_case": false,
|
|
"type": "str",
|
|
"value": ""
|
|
},
|
|
"search_query": {
|
|
"_input_type": "QueryInput",
|
|
"advanced": false,
|
|
"display_name": "Search Query",
|
|
"dynamic": false,
|
|
"info": "Enter a query to run a similarity search.",
|
|
"input_types": [
|
|
"Message"
|
|
],
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"load_from_db": false,
|
|
"name": "search_query",
|
|
"placeholder": "Enter a query...",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": true,
|
|
"trace_as_input": true,
|
|
"trace_as_metadata": true,
|
|
"type": "query",
|
|
"value": ""
|
|
},
|
|
"should_cache_vector_store": {
|
|
"_input_type": "BoolInput",
|
|
"advanced": true,
|
|
"display_name": "Cache Vector Store",
|
|
"dynamic": false,
|
|
"info": "If True, the vector store will be cached for the current build of the component. This is useful for components that have multiple output methods and want to share the same vector store.",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"name": "should_cache_vector_store",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "bool",
|
|
"value": true
|
|
},
|
|
"space_type": {
|
|
"_input_type": "DropdownInput",
|
|
"advanced": true,
|
|
"combobox": false,
|
|
"dialog_inputs": {},
|
|
"display_name": "Distance Metric",
|
|
"dynamic": false,
|
|
"external_options": {},
|
|
"info": "Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, 'cosinesimil' for cosine similarity, 'innerproduct' for dot product.",
|
|
"name": "space_type",
|
|
"options": [
|
|
"l2",
|
|
"l1",
|
|
"cosinesimil",
|
|
"linf",
|
|
"innerproduct"
|
|
],
|
|
"options_metadata": [],
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"toggle": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "str",
|
|
"value": "l2"
|
|
},
|
|
"use_ssl": {
|
|
"_input_type": "BoolInput",
|
|
"advanced": true,
|
|
"display_name": "Use SSL/TLS",
|
|
"dynamic": false,
|
|
"info": "Enable SSL/TLS encryption for secure connections to OpenSearch.",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"name": "use_ssl",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "bool",
|
|
"value": true
|
|
},
|
|
"username": {
|
|
"_input_type": "StrInput",
|
|
"advanced": false,
|
|
"display_name": "Username",
|
|
"dynamic": false,
|
|
"info": "",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"load_from_db": false,
|
|
"name": "username",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": false,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "str",
|
|
"value": "admin"
|
|
},
|
|
"vector_field": {
|
|
"_input_type": "StrInput",
|
|
"advanced": true,
|
|
"display_name": "Vector Field Name",
|
|
"dynamic": false,
|
|
"info": "Name of the field in OpenSearch documents that stores the vector embeddings for similarity search.",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"load_from_db": false,
|
|
"name": "vector_field",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "str",
|
|
"value": "chunk_embedding"
|
|
},
|
|
"verify_certs": {
|
|
"_input_type": "BoolInput",
|
|
"advanced": true,
|
|
"display_name": "Verify SSL Certificates",
|
|
"dynamic": false,
|
|
"info": "Verify SSL certificates when connecting. Disable for self-signed certificates in development environments.",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"name": "verify_certs",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "bool",
|
|
"value": false
|
|
}
|
|
},
|
|
"tool_mode": false
|
|
},
|
|
"selected_output": "search_results",
|
|
"showNode": true,
|
|
"type": "OpenSearchVectorStoreComponent"
|
|
},
|
|
"dragging": false,
|
|
"id": "OpenSearchHybrid-Ve6bS",
|
|
"measured": {
|
|
"height": 822,
|
|
"width": 320
|
|
},
|
|
"position": {
|
|
"x": 2218.9287723423276,
|
|
"y": 1332.2598463956504
|
|
},
|
|
"selected": false,
|
|
"type": "genericNode"
|
|
},
|
|
{
|
|
"data": {
|
|
"description": "Generate embeddings using a specified provider.",
|
|
"display_name": "Embedding Model",
|
|
"id": "EmbeddingModel-eZ6bT",
|
|
"node": {
|
|
"base_classes": [
|
|
"Embeddings"
|
|
],
|
|
"beta": false,
|
|
"conditional_paths": [],
|
|
"custom_fields": {},
|
|
"description": "Generate embeddings using a specified provider.",
|
|
"display_name": "Embedding Model",
|
|
"documentation": "https://docs.langflow.org/components-embedding-models",
|
|
"edited": false,
|
|
"field_order": [
|
|
"provider",
|
|
"model",
|
|
"api_key",
|
|
"api_base",
|
|
"dimensions",
|
|
"chunk_size",
|
|
"request_timeout",
|
|
"max_retries",
|
|
"show_progress_bar",
|
|
"model_kwargs"
|
|
],
|
|
"frozen": false,
|
|
"icon": "binary",
|
|
"last_updated": "2025-09-26T14:37:42.699Z",
|
|
"legacy": false,
|
|
"lf_version": "1.6.0",
|
|
"metadata": {
|
|
"code_hash": "8607e963fdef",
|
|
"dependencies": {
|
|
"dependencies": [
|
|
{
|
|
"name": "langchain_openai",
|
|
"version": "0.3.23"
|
|
},
|
|
{
|
|
"name": "lfx",
|
|
"version": null
|
|
}
|
|
],
|
|
"total_dependencies": 2
|
|
},
|
|
"module": "custom_components.embedding_model"
|
|
},
|
|
"minimized": false,
|
|
"output_types": [],
|
|
"outputs": [
|
|
{
|
|
"allows_loop": false,
|
|
"cache": true,
|
|
"display_name": "Embedding Model",
|
|
"group_outputs": false,
|
|
"method": "build_embeddings",
|
|
"name": "embeddings",
|
|
"options": null,
|
|
"required_inputs": null,
|
|
"selected": "Embeddings",
|
|
"tool_mode": true,
|
|
"types": [
|
|
"Embeddings"
|
|
],
|
|
"value": "__UNDEFINED__"
|
|
}
|
|
],
|
|
"pinned": false,
|
|
"template": {
|
|
"_type": "Component",
|
|
"api_base": {
|
|
"_input_type": "MessageTextInput",
|
|
"advanced": true,
|
|
"display_name": "API Base URL",
|
|
"dynamic": false,
|
|
"info": "Base URL for the API. Leave empty for default.",
|
|
"input_types": [
|
|
"Message"
|
|
],
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"load_from_db": false,
|
|
"name": "api_base",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_input": true,
|
|
"trace_as_metadata": true,
|
|
"type": "str",
|
|
"value": ""
|
|
},
|
|
"api_key": {
|
|
"_input_type": "SecretStrInput",
|
|
"advanced": false,
|
|
"display_name": "OpenAI API Key",
|
|
"dynamic": false,
|
|
"info": "Model Provider API key",
|
|
"input_types": [],
|
|
"load_from_db": true,
|
|
"name": "api_key",
|
|
"password": true,
|
|
"placeholder": "",
|
|
"real_time_refresh": true,
|
|
"required": true,
|
|
"show": true,
|
|
"title_case": false,
|
|
"type": "str",
|
|
"value": "OPENAI_API_KEY"
|
|
},
|
|
"chunk_size": {
|
|
"_input_type": "IntInput",
|
|
"advanced": true,
|
|
"display_name": "Chunk Size",
|
|
"dynamic": false,
|
|
"info": "",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"name": "chunk_size",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "int",
|
|
"value": 1000
|
|
},
|
|
"code": {
|
|
"advanced": true,
|
|
"dynamic": true,
|
|
"fileTypes": [],
|
|
"file_path": "",
|
|
"info": "",
|
|
"list": false,
|
|
"load_from_db": false,
|
|
"multiline": true,
|
|
"name": "code",
|
|
"password": false,
|
|
"placeholder": "",
|
|
"required": true,
|
|
"show": true,
|
|
"title_case": false,
|
|
"type": "code",
|
|
"value": "from typing import Any\n\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.schema.dotdict import dotdict\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}],\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n return OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n def update_build_config(self, build_config: dotdict, field_value: Any, field_name: str | None = None) -> dotdict:\n if field_name == \"provider\" and field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n return build_config\n"
|
|
},
|
|
"dimensions": {
|
|
"_input_type": "IntInput",
|
|
"advanced": true,
|
|
"display_name": "Dimensions",
|
|
"dynamic": false,
|
|
"info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"name": "dimensions",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "int",
|
|
"value": ""
|
|
},
|
|
"max_retries": {
|
|
"_input_type": "IntInput",
|
|
"advanced": true,
|
|
"display_name": "Max Retries",
|
|
"dynamic": false,
|
|
"info": "",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"name": "max_retries",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "int",
|
|
"value": 3
|
|
},
|
|
"model": {
|
|
"_input_type": "DropdownInput",
|
|
"advanced": false,
|
|
"combobox": false,
|
|
"dialog_inputs": {},
|
|
"display_name": "Model Name",
|
|
"dynamic": false,
|
|
"external_options": {},
|
|
"info": "Select the embedding model to use",
|
|
"name": "model",
|
|
"options": [
|
|
"text-embedding-3-small",
|
|
"text-embedding-3-large",
|
|
"text-embedding-ada-002"
|
|
],
|
|
"options_metadata": [],
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"toggle": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "str",
|
|
"value": "text-embedding-3-small"
|
|
},
|
|
"model_kwargs": {
|
|
"_input_type": "DictInput",
|
|
"advanced": true,
|
|
"display_name": "Model Kwargs",
|
|
"dynamic": false,
|
|
"info": "Additional keyword arguments to pass to the model.",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"name": "model_kwargs",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_input": true,
|
|
"type": "dict",
|
|
"value": {}
|
|
},
|
|
"provider": {
|
|
"_input_type": "DropdownInput",
|
|
"advanced": false,
|
|
"combobox": false,
|
|
"dialog_inputs": {},
|
|
"display_name": "Model Provider",
|
|
"dynamic": false,
|
|
"external_options": {},
|
|
"info": "Select the embedding model provider",
|
|
"name": "provider",
|
|
"options": [
|
|
"OpenAI"
|
|
],
|
|
"options_metadata": [
|
|
{
|
|
"icon": "OpenAI"
|
|
}
|
|
],
|
|
"placeholder": "",
|
|
"real_time_refresh": true,
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"toggle": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "str",
|
|
"value": "OpenAI"
|
|
},
|
|
"request_timeout": {
|
|
"_input_type": "FloatInput",
|
|
"advanced": true,
|
|
"display_name": "Request Timeout",
|
|
"dynamic": false,
|
|
"info": "",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"name": "request_timeout",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "float",
|
|
"value": ""
|
|
},
|
|
"show_progress_bar": {
|
|
"_input_type": "BoolInput",
|
|
"advanced": true,
|
|
"display_name": "Show Progress Bar",
|
|
"dynamic": false,
|
|
"info": "",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"name": "show_progress_bar",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "bool",
|
|
"value": false
|
|
}
|
|
},
|
|
"tool_mode": false
|
|
},
|
|
"showNode": true,
|
|
"type": "EmbeddingModel"
|
|
},
|
|
"dragging": false,
|
|
"id": "EmbeddingModel-eZ6bT",
|
|
"measured": {
|
|
"height": 369,
|
|
"width": 320
|
|
},
|
|
"position": {
|
|
"x": 1726.6943524438122,
|
|
"y": 1800.5330404375484
|
|
},
|
|
"selected": false,
|
|
"type": "genericNode"
|
|
},
|
|
{
|
|
"data": {
|
|
"id": "AdvancedDynamicFormBuilder-81Exw",
|
|
"node": {
|
|
"base_classes": [
|
|
"Data",
|
|
"Message"
|
|
],
|
|
"beta": false,
|
|
"conditional_paths": [],
|
|
"custom_fields": {},
|
|
"description": "Creates dynamic input fields that can receive data from other components or manual input.",
|
|
"display_name": "Create Data",
|
|
"documentation": "",
|
|
"edited": true,
|
|
"field_order": [
|
|
"form_fields",
|
|
"include_metadata"
|
|
],
|
|
"frozen": false,
|
|
"icon": "braces",
|
|
"last_updated": "2025-09-26T14:37:42.700Z",
|
|
"legacy": false,
|
|
"lf_version": "1.6.0",
|
|
"metadata": {},
|
|
"minimized": false,
|
|
"output_types": [],
|
|
"outputs": [
|
|
{
|
|
"allows_loop": false,
|
|
"cache": true,
|
|
"display_name": "Data",
|
|
"group_outputs": false,
|
|
"hidden": null,
|
|
"method": "process_form",
|
|
"name": "form_data",
|
|
"options": null,
|
|
"required_inputs": null,
|
|
"selected": "Data",
|
|
"tool_mode": true,
|
|
"types": [
|
|
"Data"
|
|
],
|
|
"value": "__UNDEFINED__"
|
|
},
|
|
{
|
|
"allows_loop": false,
|
|
"cache": true,
|
|
"display_name": "Message",
|
|
"group_outputs": false,
|
|
"hidden": null,
|
|
"method": "get_message",
|
|
"name": "message",
|
|
"options": null,
|
|
"required_inputs": null,
|
|
"selected": "Message",
|
|
"tool_mode": true,
|
|
"types": [
|
|
"Message"
|
|
],
|
|
"value": "__UNDEFINED__"
|
|
}
|
|
],
|
|
"pinned": false,
|
|
"template": {
|
|
"_type": "Component",
|
|
"code": {
|
|
"advanced": true,
|
|
"dynamic": true,
|
|
"fileTypes": [],
|
|
"file_path": "",
|
|
"info": "",
|
|
"list": false,
|
|
"load_from_db": false,
|
|
"multiline": true,
|
|
"name": "code",
|
|
"password": false,
|
|
"placeholder": "",
|
|
"required": true,
|
|
"show": true,
|
|
"title_case": false,
|
|
"type": "code",
|
|
"value": "from typing import Any\r\n\r\nfrom langflow.custom import Component\r\nfrom langflow.io import (\r\n BoolInput,\r\n FloatInput,\r\n HandleInput,\r\n IntInput,\r\n MultilineInput,\r\n Output,\r\n StrInput,\r\n TableInput,\r\n)\r\nfrom langflow.schema.data import Data\r\nfrom langflow.schema.message import Message\r\n\r\n\r\nclass CrateData(Component):\r\n \"\"\"Dynamic Form Component\r\n\r\n This component creates dynamic inputs that can receive data from other components\r\n or be filled manually. It demonstrates advanced dynamic input functionality with\r\n component connectivity.\r\n\r\n ## Features\r\n - **Dynamic Input Generation**: Create inputs based on table configuration\r\n - **Component Connectivity**: Inputs can receive data from other components\r\n - **Multiple Input Types**: Support for text, number, boolean, and handle inputs\r\n - **Flexible Data Sources**: Manual input OR component connections\r\n - **Real-time Updates**: Form fields update immediately when table changes\r\n - **Multiple Output Formats**: Data and formatted Message outputs\r\n - **JSON Output**: Collects all dynamic inputs into a structured JSON response\r\n\r\n ## Use Cases\r\n - Dynamic API parameter collection from multiple sources\r\n - Variable data aggregation from different components\r\n - Flexible pipeline configuration\r\n - Multi-source data processing\r\n\r\n ## Field Types Available\r\n - **text**: Single-line text input (can connect to Text/String outputs)\r\n - **multiline**: Multi-line text input (can connect to Text outputs)\r\n - **number**: Integer input (can connect to Number outputs)\r\n - **float**: Decimal number input (can connect to Number outputs)\r\n - **boolean**: True/false checkbox (can connect to Boolean outputs)\r\n - **handle**: Generic data input (can connect to any component output)\r\n - **data**: Structured data input (can connect to Data outputs)\r\n\r\n ## Input Types for Connections\r\n - **Text**: Text/String data from components\r\n - **Data**: Structured data objects\r\n - **Message**: Message objects with text content\r\n - **Number**: Numeric values\r\n - **Boolean**: True/false values\r\n - **Any**: Accepts any type of connection\r\n - **Combinations**: Text,Message | Data,Text | Text,Data,Message | etc.\r\n \"\"\"\r\n\r\n display_name = \"Create Data\"\r\n description = \"Creates dynamic input fields that can receive data from other components or manual input.\"\r\n icon = \"braces\"\r\n name = \"AdvancedDynamicFormBuilder\"\r\n\r\n def __init__(self, **kwargs):\r\n super().__init__(**kwargs)\r\n self._dynamic_inputs = {}\r\n\r\n inputs = [\r\n TableInput(\r\n name=\"form_fields\",\r\n display_name=\"Input Configuration\",\r\n info=\"Define the dynamic form fields. Each row creates a new input field that can connect to other components.\",\r\n table_schema=[\r\n {\r\n \"name\": \"field_name\",\r\n \"display_name\": \"Field Name\",\r\n \"type\": \"str\",\r\n \"description\": \"Name for the field (used as both internal name and display label)\",\r\n },\r\n {\r\n \"name\": \"field_type\",\r\n \"display_name\": \"Field Type\",\r\n \"type\": \"str\",\r\n \"description\": \"Type of input field to create\",\r\n \"options\": [\"Text\", \"Data\", \"Number\", \"Handle\", \"Boolean\"],\r\n \"value\": \"Text\",\r\n },\r\n ],\r\n value=[{\"field_name\": \"field_name\", \"field_type\": \"Text\"}],\r\n real_time_refresh=True,\r\n ),\r\n BoolInput(\r\n name=\"include_metadata\",\r\n display_name=\"Include Metadata\",\r\n info=\"Include form configuration metadata in the output.\",\r\n value=False,\r\n advanced=True,\r\n ),\r\n ]\r\n\r\n outputs = [\r\n Output(display_name=\"Data\", name=\"form_data\", method=\"process_form\"),\r\n Output(display_name=\"Message\", name=\"message\", method=\"get_message\"),\r\n ]\r\n\r\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str = None) -> dict:\r\n \"\"\"Update build configuration to add dynamic inputs that can connect to other components.\"\"\"\r\n if field_name == \"form_fields\":\r\n # Store current values before clearing dynamic inputs\r\n current_values = {}\r\n keys_to_remove = [key for key in build_config if key.startswith(\"dynamic_\")]\r\n for key in keys_to_remove:\r\n # Preserve the current value before deletion\r\n if hasattr(self, key):\r\n current_values[key] = getattr(self, key)\r\n del build_config[key]\r\n\r\n # Add dynamic inputs based on table configuration\r\n # Safety check to ensure field_value is not None and is iterable\r\n if field_value is None:\r\n field_value = []\r\n\r\n for i, field_config in enumerate(field_value):\r\n # Safety check to ensure field_config is not None\r\n if field_config is None:\r\n continue\r\n\r\n field_name = field_config.get(\"field_name\", f\"field_{i}\")\r\n display_name = field_name # Use field_name as display_name\r\n field_type_option = field_config.get(\"field_type\", \"Text\")\r\n default_value = \"\" # All fields have empty default value\r\n required = False # All fields are optional by default\r\n help_text = \"\" # All fields have empty help text\r\n\r\n # Map field type options to actual field types and input types\r\n field_type_mapping = {\r\n \"Text\": {\"field_type\": \"multiline\", \"input_types\": [\"Text\", \"Message\"]},\r\n \"Data\": {\"field_type\": \"data\", \"input_types\": [\"Data\"]},\r\n \"Number\": {\"field_type\": \"number\", \"input_types\": [\"Text\", \"Message\"]},\r\n \"Handle\": {\"field_type\": \"handle\", \"input_types\": [\"Text\", \"Data\", \"Message\"]},\r\n \"Boolean\": {\"field_type\": \"boolean\", \"input_types\": None},\r\n }\r\n\r\n field_config_mapped = field_type_mapping.get(\r\n field_type_option, {\"field_type\": \"text\", \"input_types\": []}\r\n )\r\n field_type = field_config_mapped[\"field_type\"]\r\n input_types_list = field_config_mapped[\"input_types\"]\r\n\r\n # Create the appropriate input type based on field_type\r\n dynamic_input_name = f\"dynamic_{field_name}\"\r\n\r\n if field_type == \"text\":\r\n # Use preserved value if available, otherwise use default\r\n current_value = current_values.get(dynamic_input_name, default_value)\r\n if current_value is None:\r\n current_value = default_value\r\n \r\n if input_types_list:\r\n build_config[dynamic_input_name] = StrInput(\r\n name=dynamic_input_name,\r\n display_name=display_name,\r\n info=f\"{help_text} (Can connect to: {', '.join(input_types_list)})\",\r\n value=current_value,\r\n required=required,\r\n input_types=input_types_list,\r\n )\r\n else:\r\n build_config[dynamic_input_name] = StrInput(\r\n name=dynamic_input_name,\r\n display_name=display_name,\r\n info=help_text,\r\n value=current_value,\r\n required=required,\r\n )\r\n\r\n elif field_type == \"multiline\":\r\n # Use preserved value if available, otherwise use default\r\n current_value = current_values.get(dynamic_input_name, default_value)\r\n if current_value is None:\r\n current_value = default_value\r\n \r\n if input_types_list:\r\n build_config[dynamic_input_name] = MultilineInput(\r\n name=dynamic_input_name,\r\n display_name=display_name,\r\n info=f\"{help_text} (Can connect to: {', '.join(input_types_list)})\",\r\n value=current_value,\r\n required=required,\r\n input_types=input_types_list,\r\n )\r\n else:\r\n build_config[dynamic_input_name] = MultilineInput(\r\n name=dynamic_input_name,\r\n display_name=display_name,\r\n info=help_text,\r\n value=current_value,\r\n required=required,\r\n )\r\n\r\n elif field_type == \"number\":\r\n # Use preserved value if available, otherwise use default\r\n current_value = current_values.get(dynamic_input_name, default_value)\r\n if current_value is None:\r\n current_value = default_value\r\n \r\n try:\r\n if current_value:\r\n current_int = int(current_value)\r\n else:\r\n current_int = 0\r\n except (ValueError, TypeError):\r\n try:\r\n current_int = int(default_value) if default_value else 0\r\n except ValueError:\r\n current_int = 0\r\n\r\n if input_types_list:\r\n build_config[dynamic_input_name] = IntInput(\r\n name=dynamic_input_name,\r\n display_name=display_name,\r\n info=f\"{help_text} (Can connect to: {', '.join(input_types_list)})\",\r\n value=current_int,\r\n required=required,\r\n input_types=input_types_list,\r\n )\r\n else:\r\n build_config[dynamic_input_name] = IntInput(\r\n name=dynamic_input_name,\r\n display_name=display_name,\r\n info=help_text,\r\n value=current_int,\r\n required=required,\r\n )\r\n\r\n elif field_type == \"float\":\r\n # Use preserved value if available, otherwise use default\r\n current_value = current_values.get(dynamic_input_name, default_value)\r\n if current_value is None:\r\n current_value = default_value\r\n \r\n try:\r\n if current_value:\r\n current_float = float(current_value)\r\n else:\r\n current_float = 0.0\r\n except (ValueError, TypeError):\r\n try:\r\n current_float = float(default_value) if default_value else 0.0\r\n except ValueError:\r\n current_float = 0.0\r\n\r\n if input_types_list:\r\n build_config[dynamic_input_name] = FloatInput(\r\n name=dynamic_input_name,\r\n display_name=display_name,\r\n info=f\"{help_text} (Can connect to: {', '.join(input_types_list)})\",\r\n value=current_float,\r\n required=required,\r\n input_types=input_types_list,\r\n )\r\n else:\r\n build_config[dynamic_input_name] = FloatInput(\r\n name=dynamic_input_name,\r\n display_name=display_name,\r\n info=help_text,\r\n value=current_float,\r\n required=required,\r\n )\r\n\r\n elif field_type == \"boolean\":\r\n # Use preserved value if available, otherwise use default\r\n current_value = current_values.get(dynamic_input_name, default_value)\r\n if current_value is None:\r\n current_value = default_value\r\n \r\n # Convert current value to boolean\r\n if isinstance(current_value, bool):\r\n current_bool = current_value\r\n else:\r\n current_bool = str(current_value).lower() in [\"true\", \"1\", \"yes\"] if current_value else False\r\n\r\n # Boolean fields don't use input_types parameter to avoid errors\r\n build_config[dynamic_input_name] = BoolInput(\r\n name=dynamic_input_name,\r\n display_name=display_name,\r\n info=help_text,\r\n value=current_bool,\r\n input_types=[],\r\n required=required,\r\n )\r\n\r\n elif field_type == \"handle\":\r\n # HandleInput for generic data connections\r\n build_config[dynamic_input_name] = HandleInput(\r\n name=dynamic_input_name,\r\n display_name=display_name,\r\n info=f\"{help_text} (Accepts: {', '.join(input_types_list) if input_types_list else 'Any'})\",\r\n input_types=input_types_list if input_types_list else [\"Data\", \"Text\", \"Message\"],\r\n required=required,\r\n )\r\n\r\n elif field_type == \"data\":\r\n # Specialized for Data type connections\r\n build_config[dynamic_input_name] = HandleInput(\r\n name=dynamic_input_name,\r\n display_name=display_name,\r\n info=f\"{help_text} (Data input)\",\r\n input_types=[\"Data\"] if not input_types_list else input_types_list,\r\n required=required,\r\n )\r\n\r\n else:\r\n # Default to text input for unknown types\r\n # Use preserved value if available, otherwise use default\r\n current_value = current_values.get(dynamic_input_name, default_value)\r\n if current_value is None:\r\n current_value = default_value\r\n \r\n build_config[dynamic_input_name] = StrInput(\r\n name=dynamic_input_name,\r\n display_name=display_name,\r\n info=f\"{help_text} (Unknown type '{field_type}', defaulting to text)\",\r\n value=current_value,\r\n required=required,\r\n )\r\n\r\n return build_config\r\n\r\n def get_dynamic_values(self) -> dict[str, Any]:\r\n \"\"\"Extract simple values from all dynamic inputs, handling both manual and connected inputs.\"\"\"\r\n dynamic_values = {}\r\n connection_info = {}\r\n form_fields = getattr(self, \"form_fields\", [])\r\n\r\n for field_config in form_fields:\r\n # Safety check to ensure field_config is not None\r\n if field_config is None:\r\n continue\r\n\r\n field_name = field_config.get(\"field_name\", \"\")\r\n if field_name:\r\n dynamic_input_name = f\"dynamic_{field_name}\"\r\n value = getattr(self, dynamic_input_name, None)\r\n\r\n # Extract simple values from connections or manual input\r\n if value is not None:\r\n try:\r\n extracted_value = self._extract_simple_value(value)\r\n dynamic_values[field_name] = extracted_value\r\n\r\n # Determine connection type for status\r\n if hasattr(value, \"text\") and hasattr(value, \"timestamp\"):\r\n connection_info[field_name] = \"Connected (Message)\"\r\n elif hasattr(value, \"data\"):\r\n connection_info[field_name] = \"Connected (Data)\"\r\n elif isinstance(value, (str, int, float, bool, list, dict)):\r\n connection_info[field_name] = \"Manual input\"\r\n else:\r\n connection_info[field_name] = \"Connected (Object)\"\r\n\r\n except Exception:\r\n # Fallback to string representation if all else fails\r\n dynamic_values[field_name] = str(value)\r\n connection_info[field_name] = \"Error\"\r\n else:\r\n # Use empty default value if nothing connected\r\n dynamic_values[field_name] = \"\"\r\n connection_info[field_name] = \"Empty default\"\r\n\r\n # Store connection info for status output\r\n self._connection_info = connection_info\r\n return dynamic_values\r\n\r\n def _extract_simple_value(self, value: Any) -> Any:\r\n \"\"\"Extract the simplest, most useful value from any input type.\"\"\"\r\n # Handle None\r\n if value is None:\r\n return None\r\n\r\n # Handle simple types directly\r\n if isinstance(value, (str, int, float, bool)):\r\n return value\r\n\r\n # Handle lists and tuples - keep simple\r\n if isinstance(value, (list, tuple)):\r\n return [self._extract_simple_value(item) for item in value]\r\n\r\n # Handle dictionaries - keep simple\r\n if isinstance(value, dict):\r\n return {str(k): self._extract_simple_value(v) for k, v in value.items()}\r\n\r\n # Handle Message objects - extract only the text\r\n if hasattr(value, \"text\"):\r\n return str(value.text) if value.text is not None else \"\"\r\n\r\n # Handle Data objects - extract the data content\r\n if hasattr(value, \"data\") and value.data is not None:\r\n return self._extract_simple_value(value.data)\r\n\r\n # For any other object, convert to string\r\n return str(value)\r\n\r\n def process_form(self) -> Data:\r\n \"\"\"Process all dynamic form inputs and return clean data with just field values.\"\"\"\r\n # Get all dynamic values (just the key:value pairs)\r\n dynamic_values = self.get_dynamic_values()\r\n\r\n # Update status with connection info\r\n connected_fields = len([v for v in getattr(self, \"_connection_info\", {}).values() if \"Connected\" in v])\r\n total_fields = len(dynamic_values)\r\n\r\n self.status = f\"Form processed successfully. {connected_fields}/{total_fields} fields connected to components.\"\r\n\r\n # Return clean Data object with just the field values\r\n return Data(data=dynamic_values)\r\n\r\n def get_message(self) -> Message:\r\n \"\"\"Return form data as a formatted text message.\"\"\"\r\n # Get all dynamic values\r\n dynamic_values = self.get_dynamic_values()\r\n\r\n if not dynamic_values:\r\n return Message(text=\"No form data available\")\r\n\r\n # Format as text message\r\n message_lines = [\"📋 Form Data:\"]\r\n message_lines.append(\"=\" * 40)\r\n\r\n for field_name, value in dynamic_values.items():\r\n # Use field_name as display_name\r\n display_name = field_name\r\n\r\n message_lines.append(f\"• {display_name}: {value}\")\r\n\r\n message_lines.append(\"=\" * 40)\r\n message_lines.append(f\"Total fields: {len(dynamic_values)}\")\r\n\r\n message_text = \"\\n\".join(message_lines)\r\n self.status = f\"Message formatted with {len(dynamic_values)} fields\"\r\n\r\n return Message(text=message_text)"
|
|
},
|
|
"dynamic_connector_type": {
|
|
"_input_type": "MultilineInput",
|
|
"advanced": false,
|
|
"copy_field": false,
|
|
"display_name": "connector_type",
|
|
"dynamic": false,
|
|
"helper_text": null,
|
|
"info": " (Can connect to: Text, Message)",
|
|
"input_types": [
|
|
"Text",
|
|
"Message"
|
|
],
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"load_from_db": false,
|
|
"multiline": true,
|
|
"name": "dynamic_connector_type",
|
|
"placeholder": "",
|
|
"real_time_refresh": null,
|
|
"refresh_button": null,
|
|
"refresh_button_text": null,
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_input": true,
|
|
"trace_as_metadata": true,
|
|
"type": "str",
|
|
"value": ""
|
|
},
|
|
"dynamic_owner": {
|
|
"_input_type": "MultilineInput",
|
|
"advanced": false,
|
|
"copy_field": false,
|
|
"display_name": "owner",
|
|
"dynamic": false,
|
|
"helper_text": null,
|
|
"info": " (Can connect to: Text, Message)",
|
|
"input_types": [
|
|
"Text",
|
|
"Message"
|
|
],
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"load_from_db": false,
|
|
"multiline": true,
|
|
"name": "dynamic_owner",
|
|
"placeholder": "",
|
|
"real_time_refresh": null,
|
|
"refresh_button": null,
|
|
"refresh_button_text": null,
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_input": true,
|
|
"trace_as_metadata": true,
|
|
"type": "str",
|
|
"value": ""
|
|
},
|
|
"dynamic_owner_email": {
|
|
"_input_type": "MultilineInput",
|
|
"advanced": false,
|
|
"copy_field": false,
|
|
"display_name": "owner_email",
|
|
"dynamic": false,
|
|
"helper_text": null,
|
|
"info": " (Can connect to: Text, Message)",
|
|
"input_types": [
|
|
"Text",
|
|
"Message"
|
|
],
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"load_from_db": false,
|
|
"multiline": true,
|
|
"name": "dynamic_owner_email",
|
|
"placeholder": "",
|
|
"real_time_refresh": null,
|
|
"refresh_button": null,
|
|
"refresh_button_text": null,
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_input": true,
|
|
"trace_as_metadata": true,
|
|
"type": "str",
|
|
"value": ""
|
|
},
|
|
"dynamic_owner_name": {
|
|
"_input_type": "MultilineInput",
|
|
"advanced": false,
|
|
"copy_field": false,
|
|
"display_name": "owner_name",
|
|
"dynamic": false,
|
|
"helper_text": null,
|
|
"info": " (Can connect to: Text, Message)",
|
|
"input_types": [
|
|
"Text",
|
|
"Message"
|
|
],
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"load_from_db": false,
|
|
"multiline": true,
|
|
"name": "dynamic_owner_name",
|
|
"placeholder": "",
|
|
"real_time_refresh": null,
|
|
"refresh_button": null,
|
|
"refresh_button_text": null,
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_input": true,
|
|
"trace_as_metadata": true,
|
|
"type": "str",
|
|
"value": ""
|
|
},
|
|
"form_fields": {
|
|
"_input_type": "TableInput",
|
|
"advanced": false,
|
|
"display_name": "Input Configuration",
|
|
"dynamic": false,
|
|
"info": "Define the dynamic form fields. Each row creates a new input field that can connect to other components.",
|
|
"is_list": true,
|
|
"list_add_label": "Add More",
|
|
"load_from_db": false,
|
|
"name": "form_fields",
|
|
"placeholder": "",
|
|
"real_time_refresh": true,
|
|
"required": false,
|
|
"show": true,
|
|
"table_icon": "Table",
|
|
"table_schema": {
|
|
"columns": [
|
|
{
|
|
"default": "None",
|
|
"description": "Name for the field (used as both internal name and display label)",
|
|
"disable_edit": false,
|
|
"display_name": "Field Name",
|
|
"edit_mode": "popover",
|
|
"filterable": true,
|
|
"formatter": "text",
|
|
"hidden": false,
|
|
"name": "field_name",
|
|
"sortable": true,
|
|
"type": "str"
|
|
},
|
|
{
|
|
"default": "None",
|
|
"description": "Type of input field to create",
|
|
"disable_edit": false,
|
|
"display_name": "Field Type",
|
|
"edit_mode": "popover",
|
|
"filterable": true,
|
|
"formatter": "text",
|
|
"hidden": false,
|
|
"name": "field_type",
|
|
"options": [
|
|
"Text",
|
|
"Data",
|
|
"Number",
|
|
"Handle",
|
|
"Boolean"
|
|
],
|
|
"sortable": true,
|
|
"type": "str"
|
|
}
|
|
]
|
|
},
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"trigger_icon": "Table",
|
|
"trigger_text": "Open table",
|
|
"type": "table",
|
|
"value": [
|
|
{
|
|
"field_name": "owner",
|
|
"field_type": "Text"
|
|
},
|
|
{
|
|
"field_name": "owner_name",
|
|
"field_type": "Text"
|
|
},
|
|
{
|
|
"field_name": "owner_email",
|
|
"field_type": "Text"
|
|
},
|
|
{
|
|
"field_name": "connector_type",
|
|
"field_type": "Text"
|
|
}
|
|
]
|
|
},
|
|
"include_metadata": {
|
|
"_input_type": "BoolInput",
|
|
"advanced": true,
|
|
"display_name": "Include Metadata",
|
|
"dynamic": false,
|
|
"info": "Include form configuration metadata in the output.",
|
|
"list": false,
|
|
"list_add_label": "Add More",
|
|
"name": "include_metadata",
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"tool_mode": false,
|
|
"trace_as_metadata": true,
|
|
"type": "bool",
|
|
"value": false
|
|
}
|
|
},
|
|
"tool_mode": false
|
|
},
|
|
"selected_output": "form_data",
|
|
"showNode": true,
|
|
"type": "AdvancedDynamicFormBuilder"
|
|
},
|
|
"dragging": false,
|
|
"id": "AdvancedDynamicFormBuilder-81Exw",
|
|
"measured": {
|
|
"height": 552,
|
|
"width": 320
|
|
},
|
|
"position": {
|
|
"x": 1363.7188885586695,
|
|
"y": 1810.433145275832
|
|
},
|
|
"selected": false,
|
|
"type": "genericNode"
|
|
},
|
|
{
|
|
"data": {
|
|
"id": "SecretInput-F34VJ",
|
|
"node": {
|
|
"base_classes": [
|
|
"Message"
|
|
],
|
|
"beta": false,
|
|
"conditional_paths": [],
|
|
"custom_fields": {},
|
|
"description": "Allows the selection of a secret to be generated as output..",
|
|
"display_name": "Secret Input",
|
|
"documentation": "https://docs.langflow.org/components-io#text-input",
|
|
"edited": true,
|
|
"field_order": [
|
|
"input_value"
|
|
],
|
|
"frozen": false,
|
|
"icon": "type",
|
|
"legacy": false,
|
|
"lf_version": "1.6.0",
|
|
"metadata": {},
|
|
"minimized": false,
|
|
"output_types": [],
|
|
"outputs": [
|
|
{
|
|
"allows_loop": false,
|
|
"cache": true,
|
|
"display_name": "Output Text",
|
|
"group_outputs": false,
|
|
"hidden": null,
|
|
"method": "text_response",
|
|
"name": "text",
|
|
"options": null,
|
|
"required_inputs": null,
|
|
"selected": "Message",
|
|
"tool_mode": true,
|
|
"types": [
|
|
"Message"
|
|
],
|
|
"value": "__UNDEFINED__"
|
|
}
|
|
],
|
|
"pinned": false,
|
|
"template": {
|
|
"_type": "Component",
|
|
"code": {
|
|
"advanced": true,
|
|
"dynamic": true,
|
|
"fileTypes": [],
|
|
"file_path": "",
|
|
"info": "",
|
|
"list": false,
|
|
"load_from_db": false,
|
|
"multiline": true,
|
|
"name": "code",
|
|
"password": false,
|
|
"placeholder": "",
|
|
"required": true,
|
|
"show": true,
|
|
"title_case": false,
|
|
"type": "code",
|
|
"value": "from langflow.base.io.text import TextComponent\r\nfrom langflow.io import MultilineInput, Output, SecretStrInput\r\nfrom langflow.schema.message import Message\r\n\r\n\r\nclass SecretInputComponent(TextComponent):\r\n display_name = \"Secret Input\"\r\n description = \"Allows the selection of a secret to be generated as output..\"\r\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\r\n icon = \"type\"\r\n name = \"SecretInput\"\r\n\r\n inputs = [\r\n SecretStrInput(\r\n name=\"input_value\",\r\n display_name=\"Secret\",\r\n info=\"Secret to be passed as input.\",\r\n ),\r\n ]\r\n outputs = [\r\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\r\n ]\r\n\r\n def text_response(self) -> Message:\r\n return Message(\r\n text=self.input_value,\r\n )\r\n"
|
|
},
|
|
"input_value": {
|
|
"_input_type": "SecretStrInput",
|
|
"advanced": false,
|
|
"display_name": "Secret",
|
|
"dynamic": false,
|
|
"info": "Secret to be passed as input.",
|
|
"input_types": [],
|
|
"load_from_db": true,
|
|
"name": "input_value",
|
|
"password": true,
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"type": "str",
|
|
"value": "CONNECTOR_TYPE"
|
|
}
|
|
},
|
|
"tool_mode": false
|
|
},
|
|
"showNode": true,
|
|
"type": "SecretInput"
|
|
},
|
|
"dragging": false,
|
|
"id": "SecretInput-F34VJ",
|
|
"measured": {
|
|
"height": 220,
|
|
"width": 320
|
|
},
|
|
"position": {
|
|
"x": 714.0622664079099,
|
|
"y": 1763.5050239191407
|
|
},
|
|
"selected": false,
|
|
"type": "genericNode"
|
|
},
|
|
{
|
|
"data": {
|
|
"id": "SecretInput-b2cab",
|
|
"node": {
|
|
"base_classes": [
|
|
"Message"
|
|
],
|
|
"beta": false,
|
|
"conditional_paths": [],
|
|
"custom_fields": {},
|
|
"description": "Allows the selection of a secret to be generated as output..",
|
|
"display_name": "Secret Input",
|
|
"documentation": "https://docs.langflow.org/components-io#text-input",
|
|
"edited": true,
|
|
"field_order": [
|
|
"input_value"
|
|
],
|
|
"frozen": false,
|
|
"icon": "type",
|
|
"legacy": false,
|
|
"lf_version": "1.6.0",
|
|
"metadata": {},
|
|
"minimized": false,
|
|
"output_types": [],
|
|
"outputs": [
|
|
{
|
|
"allows_loop": false,
|
|
"cache": true,
|
|
"display_name": "Output Text",
|
|
"group_outputs": false,
|
|
"hidden": null,
|
|
"method": "text_response",
|
|
"name": "text",
|
|
"options": null,
|
|
"required_inputs": null,
|
|
"selected": "Message",
|
|
"tool_mode": true,
|
|
"types": [
|
|
"Message"
|
|
],
|
|
"value": "__UNDEFINED__"
|
|
}
|
|
],
|
|
"pinned": false,
|
|
"template": {
|
|
"_type": "Component",
|
|
"code": {
|
|
"advanced": true,
|
|
"dynamic": true,
|
|
"fileTypes": [],
|
|
"file_path": "",
|
|
"info": "",
|
|
"list": false,
|
|
"load_from_db": false,
|
|
"multiline": true,
|
|
"name": "code",
|
|
"password": false,
|
|
"placeholder": "",
|
|
"required": true,
|
|
"show": true,
|
|
"title_case": false,
|
|
"type": "code",
|
|
"value": "from langflow.base.io.text import TextComponent\r\nfrom langflow.io import MultilineInput, Output, SecretStrInput\r\nfrom langflow.schema.message import Message\r\n\r\n\r\nclass SecretInputComponent(TextComponent):\r\n display_name = \"Secret Input\"\r\n description = \"Allows the selection of a secret to be generated as output..\"\r\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\r\n icon = \"type\"\r\n name = \"SecretInput\"\r\n\r\n inputs = [\r\n SecretStrInput(\r\n name=\"input_value\",\r\n display_name=\"Secret\",\r\n info=\"Secret to be passed as input.\",\r\n ),\r\n ]\r\n outputs = [\r\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\r\n ]\r\n\r\n def text_response(self) -> Message:\r\n return Message(\r\n text=self.input_value,\r\n )\r\n"
|
|
},
|
|
"input_value": {
|
|
"_input_type": "SecretStrInput",
|
|
"advanced": false,
|
|
"display_name": "Secret",
|
|
"dynamic": false,
|
|
"info": "Secret to be passed as input.",
|
|
"input_types": [],
|
|
"load_from_db": true,
|
|
"name": "input_value",
|
|
"password": true,
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"type": "str",
|
|
"value": "OWNER"
|
|
}
|
|
},
|
|
"tool_mode": false
|
|
},
|
|
"showNode": true,
|
|
"type": "SecretInput"
|
|
},
|
|
"dragging": false,
|
|
"id": "SecretInput-b2cab",
|
|
"measured": {
|
|
"height": 220,
|
|
"width": 320
|
|
},
|
|
"position": {
|
|
"x": 714.4587372144712,
|
|
"y": 2004.1002386954729
|
|
},
|
|
"selected": true,
|
|
"type": "genericNode"
|
|
},
|
|
{
|
|
"data": {
|
|
"id": "SecretInput-ZVfuS",
|
|
"node": {
|
|
"base_classes": [
|
|
"Message"
|
|
],
|
|
"beta": false,
|
|
"conditional_paths": [],
|
|
"custom_fields": {},
|
|
"description": "Allows the selection of a secret to be generated as output..",
|
|
"display_name": "Secret Input",
|
|
"documentation": "https://docs.langflow.org/components-io#text-input",
|
|
"edited": true,
|
|
"field_order": [
|
|
"input_value"
|
|
],
|
|
"frozen": false,
|
|
"icon": "type",
|
|
"legacy": false,
|
|
"lf_version": "1.6.0",
|
|
"metadata": {},
|
|
"minimized": false,
|
|
"output_types": [],
|
|
"outputs": [
|
|
{
|
|
"allows_loop": false,
|
|
"cache": true,
|
|
"display_name": "Output Text",
|
|
"group_outputs": false,
|
|
"hidden": null,
|
|
"method": "text_response",
|
|
"name": "text",
|
|
"options": null,
|
|
"required_inputs": null,
|
|
"selected": "Message",
|
|
"tool_mode": true,
|
|
"types": [
|
|
"Message"
|
|
],
|
|
"value": "__UNDEFINED__"
|
|
}
|
|
],
|
|
"pinned": false,
|
|
"template": {
|
|
"_type": "Component",
|
|
"code": {
|
|
"advanced": true,
|
|
"dynamic": true,
|
|
"fileTypes": [],
|
|
"file_path": "",
|
|
"info": "",
|
|
"list": false,
|
|
"load_from_db": false,
|
|
"multiline": true,
|
|
"name": "code",
|
|
"password": false,
|
|
"placeholder": "",
|
|
"required": true,
|
|
"show": true,
|
|
"title_case": false,
|
|
"type": "code",
|
|
"value": "from langflow.base.io.text import TextComponent\r\nfrom langflow.io import MultilineInput, Output, SecretStrInput\r\nfrom langflow.schema.message import Message\r\n\r\n\r\nclass SecretInputComponent(TextComponent):\r\n display_name = \"Secret Input\"\r\n description = \"Allows the selection of a secret to be generated as output..\"\r\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\r\n icon = \"type\"\r\n name = \"SecretInput\"\r\n\r\n inputs = [\r\n SecretStrInput(\r\n name=\"input_value\",\r\n display_name=\"Secret\",\r\n info=\"Secret to be passed as input.\",\r\n ),\r\n ]\r\n outputs = [\r\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\r\n ]\r\n\r\n def text_response(self) -> Message:\r\n return Message(\r\n text=self.input_value,\r\n )\r\n"
|
|
},
|
|
"input_value": {
|
|
"_input_type": "SecretStrInput",
|
|
"advanced": false,
|
|
"display_name": "Secret",
|
|
"dynamic": false,
|
|
"info": "Secret to be passed as input.",
|
|
"input_types": [],
|
|
"load_from_db": true,
|
|
"name": "input_value",
|
|
"password": true,
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"type": "str",
|
|
"value": "OWNER_EMAIL"
|
|
}
|
|
},
|
|
"tool_mode": false
|
|
},
|
|
"showNode": true,
|
|
"type": "SecretInput"
|
|
},
|
|
"dragging": false,
|
|
"id": "SecretInput-ZVfuS",
|
|
"measured": {
|
|
"height": 220,
|
|
"width": 320
|
|
},
|
|
"position": {
|
|
"x": 709.8548852366719,
|
|
"y": 2250.972699431992
|
|
},
|
|
"selected": false,
|
|
"type": "genericNode"
|
|
},
|
|
{
|
|
"data": {
|
|
"id": "SecretInput-Iqtxd",
|
|
"node": {
|
|
"base_classes": [
|
|
"Message"
|
|
],
|
|
"beta": false,
|
|
"conditional_paths": [],
|
|
"custom_fields": {},
|
|
"description": "Allows the selection of a secret to be generated as output..",
|
|
"display_name": "Secret Input",
|
|
"documentation": "https://docs.langflow.org/components-io#text-input",
|
|
"edited": true,
|
|
"field_order": [
|
|
"input_value"
|
|
],
|
|
"frozen": false,
|
|
"icon": "type",
|
|
"legacy": false,
|
|
"lf_version": "1.6.0",
|
|
"metadata": {},
|
|
"minimized": false,
|
|
"output_types": [],
|
|
"outputs": [
|
|
{
|
|
"allows_loop": false,
|
|
"cache": true,
|
|
"display_name": "Output Text",
|
|
"group_outputs": false,
|
|
"hidden": null,
|
|
"method": "text_response",
|
|
"name": "text",
|
|
"options": null,
|
|
"required_inputs": null,
|
|
"selected": "Message",
|
|
"tool_mode": true,
|
|
"types": [
|
|
"Message"
|
|
],
|
|
"value": "__UNDEFINED__"
|
|
}
|
|
],
|
|
"pinned": false,
|
|
"template": {
|
|
"_type": "Component",
|
|
"code": {
|
|
"advanced": true,
|
|
"dynamic": true,
|
|
"fileTypes": [],
|
|
"file_path": "",
|
|
"info": "",
|
|
"list": false,
|
|
"load_from_db": false,
|
|
"multiline": true,
|
|
"name": "code",
|
|
"password": false,
|
|
"placeholder": "",
|
|
"required": true,
|
|
"show": true,
|
|
"title_case": false,
|
|
"type": "code",
|
|
"value": "from langflow.base.io.text import TextComponent\r\nfrom langflow.io import MultilineInput, Output, SecretStrInput\r\nfrom langflow.schema.message import Message\r\n\r\n\r\nclass SecretInputComponent(TextComponent):\r\n display_name = \"Secret Input\"\r\n description = \"Allows the selection of a secret to be generated as output..\"\r\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\r\n icon = \"type\"\r\n name = \"SecretInput\"\r\n\r\n inputs = [\r\n SecretStrInput(\r\n name=\"input_value\",\r\n display_name=\"Secret\",\r\n info=\"Secret to be passed as input.\",\r\n ),\r\n ]\r\n outputs = [\r\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\r\n ]\r\n\r\n def text_response(self) -> Message:\r\n return Message(\r\n text=self.input_value,\r\n )\r\n"
|
|
},
|
|
"input_value": {
|
|
"_input_type": "SecretStrInput",
|
|
"advanced": false,
|
|
"display_name": "Secret",
|
|
"dynamic": false,
|
|
"info": "Secret to be passed as input.",
|
|
"input_types": [],
|
|
"load_from_db": true,
|
|
"name": "input_value",
|
|
"password": true,
|
|
"placeholder": "",
|
|
"required": false,
|
|
"show": true,
|
|
"title_case": false,
|
|
"type": "str",
|
|
"value": "OWNER_NAME"
|
|
}
|
|
},
|
|
"tool_mode": false
|
|
},
|
|
"showNode": true,
|
|
"type": "SecretInput"
|
|
},
|
|
"dragging": false,
|
|
"id": "SecretInput-Iqtxd",
|
|
"measured": {
|
|
"height": 220,
|
|
"width": 320
|
|
},
|
|
"position": {
|
|
"x": 712.1292482141275,
|
|
"y": 2505.6122587806585
|
|
},
|
|
"selected": false,
|
|
"type": "genericNode"
|
|
}
|
|
],
|
|
"viewport": {
|
|
"x": -311.42857930212404,
|
|
"y": -532.9060284457172,
|
|
"zoom": 0.5361317364942912
|
|
}
|
|
},
|
|
"description": "Load your data for chat context with Retrieval Augmented Generation.",
|
|
"endpoint_name": null,
|
|
"id": "5488df7c-b93f-4f87-a446-b67028bc0813",
|
|
"is_component": false,
|
|
"last_tested_version": "1.6.0",
|
|
"name": "OpenSearch Ingestion Flow",
|
|
"tags": [
|
|
"openai",
|
|
"astradb",
|
|
"rag",
|
|
"q-a"
|
|
]
|
|
} |