* Added flows with new components * commented model provider assignment * Added agent component display name * commented provider assignment, assign provider on the generic component, assign custom values * fixed ollama not showing loading steps, fixed loading steps never being removed * made embedding and llm model optional on onboarding call * added isEmbedding handling on useModelSelection * added isEmbedding on onboarding card, separating embedding from non embedding card * Added one additional step to configure embeddings * Added embedding provider config * Changed settings.py to return if not embedding * Added editing fields to onboarding * updated onboarding and flows_service to change embedding and llm separately * updated templates that needs to be changed with provider values * updated flows with new components * Changed config manager to not have default models * Changed flows_service settings * Complete steps if not embedding * Add more onboarding steps * Removed one step from llm steps * Added Anthropic as a model for the language model on the frontend * Added anthropic models * Added anthropic support on Backend * Fixed provider health and validation * Format settings * Change anthropic logo * Changed button to not jump * Changed flows service to make anthropic work * Fixed some things * add embedding specific global variables * updated flows * fixed ingestion flow * Implemented anthropic on settings page * add embedding provider logo * updated backend to work with multiple provider config * update useUpdateSettings with new settings type * updated provider health banner to check for health with new api * changed queries and mutations to use new api * changed embedding model input to work with new api * Implemented provider based config on the frontend * update existing design * fixed settings configured * fixed provider health query to include health check for both the providers * Changed model-providers to show correctly the configured providers * Updated prompt * updated openrag agent * Fixed settings to allow editing providers and changing llm and embedding models * updated settings * changed lf ver * bump openrag version * added more steps * update settings to create the global variables * updated steps * updated default prompt --------- Co-authored-by: Sebastián Estévez <estevezsebastian@gmail.com>
3100 lines
No EOL
249 KiB
JSON
3100 lines
No EOL
249 KiB
JSON
{
|
||
"data": {
|
||
"edges": [
|
||
{
|
||
"animated": false,
|
||
"className": "",
|
||
"data": {
|
||
"sourceHandle": {
|
||
"dataType": "TextInput",
|
||
"id": "TextInput-aHsQb",
|
||
"name": "text",
|
||
"output_types": [
|
||
"Message"
|
||
]
|
||
},
|
||
"targetHandle": {
|
||
"fieldName": "filter_expression",
|
||
"id": "OpenSearch-iYfjf",
|
||
"inputTypes": [
|
||
"Message"
|
||
],
|
||
"type": "str"
|
||
}
|
||
},
|
||
"id": "xy-edge__TextInput-aHsQb{œdataTypeœ:œTextInputœ,œidœ:œTextInput-aHsQbœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-OpenSearch-iYfjf{œfieldNameœ:œfilter_expressionœ,œidœ:œOpenSearch-iYfjfœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
|
||
"selected": false,
|
||
"source": "TextInput-aHsQb",
|
||
"sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-aHsQbœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}",
|
||
"target": "OpenSearch-iYfjf",
|
||
"targetHandle": "{œfieldNameœ:œfilter_expressionœ,œidœ:œOpenSearch-iYfjfœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}"
|
||
},
|
||
{
|
||
"animated": false,
|
||
"className": "",
|
||
"data": {
|
||
"sourceHandle": {
|
||
"dataType": "EmbeddingModel",
|
||
"id": "EmbeddingModel-oPi95",
|
||
"name": "embeddings",
|
||
"output_types": [
|
||
"Embeddings"
|
||
]
|
||
},
|
||
"targetHandle": {
|
||
"fieldName": "embedding",
|
||
"id": "OpenSearch-iYfjf",
|
||
"inputTypes": [
|
||
"Embeddings"
|
||
],
|
||
"type": "other"
|
||
}
|
||
},
|
||
"id": "xy-edge__EmbeddingModel-oPi95{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-oPi95œ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-OpenSearch-iYfjf{œfieldNameœ:œembeddingœ,œidœ:œOpenSearch-iYfjfœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}",
|
||
"selected": false,
|
||
"source": "EmbeddingModel-oPi95",
|
||
"sourceHandle": "{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-oPi95œ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}",
|
||
"target": "OpenSearch-iYfjf",
|
||
"targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearch-iYfjfœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}"
|
||
},
|
||
{
|
||
"animated": false,
|
||
"className": "",
|
||
"data": {
|
||
"sourceHandle": {
|
||
"dataType": "OpenSearchVectorStoreComponent",
|
||
"id": "OpenSearch-iYfjf",
|
||
"name": "component_as_tool",
|
||
"output_types": [
|
||
"Tool"
|
||
]
|
||
},
|
||
"targetHandle": {
|
||
"fieldName": "tools",
|
||
"id": "Agent-Nfw7u",
|
||
"inputTypes": [
|
||
"Tool"
|
||
],
|
||
"type": "other"
|
||
}
|
||
},
|
||
"id": "xy-edge__OpenSearch-iYfjf{œdataTypeœ:œOpenSearchVectorStoreComponentœ,œidœ:œOpenSearch-iYfjfœ,œnameœ:œcomponent_as_toolœ,œoutput_typesœ:[œToolœ]}-Agent-Nfw7u{œfieldNameœ:œtoolsœ,œidœ:œAgent-Nfw7uœ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}",
|
||
"selected": false,
|
||
"source": "OpenSearch-iYfjf",
|
||
"sourceHandle": "{œdataTypeœ:œOpenSearchVectorStoreComponentœ,œidœ:œOpenSearch-iYfjfœ,œnameœ:œcomponent_as_toolœ,œoutput_typesœ:[œToolœ]}",
|
||
"target": "Agent-Nfw7u",
|
||
"targetHandle": "{œfieldNameœ:œtoolsœ,œidœ:œAgent-Nfw7uœ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}"
|
||
},
|
||
{
|
||
"animated": false,
|
||
"className": "",
|
||
"data": {
|
||
"sourceHandle": {
|
||
"dataType": "MCP",
|
||
"id": "MCP-7EY21",
|
||
"name": "component_as_tool",
|
||
"output_types": [
|
||
"Tool"
|
||
]
|
||
},
|
||
"targetHandle": {
|
||
"fieldName": "tools",
|
||
"id": "Agent-Nfw7u",
|
||
"inputTypes": [
|
||
"Tool"
|
||
],
|
||
"type": "other"
|
||
}
|
||
},
|
||
"id": "xy-edge__MCP-7EY21{œdataTypeœ:œMCPœ,œidœ:œMCP-7EY21œ,œnameœ:œcomponent_as_toolœ,œoutput_typesœ:[œToolœ]}-Agent-Nfw7u{œfieldNameœ:œtoolsœ,œidœ:œAgent-Nfw7uœ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}",
|
||
"selected": false,
|
||
"source": "MCP-7EY21",
|
||
"sourceHandle": "{œdataTypeœ:œMCPœ,œidœ:œMCP-7EY21œ,œnameœ:œcomponent_as_toolœ,œoutput_typesœ:[œToolœ]}",
|
||
"target": "Agent-Nfw7u",
|
||
"targetHandle": "{œfieldNameœ:œtoolsœ,œidœ:œAgent-Nfw7uœ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}"
|
||
},
|
||
{
|
||
"animated": false,
|
||
"className": "",
|
||
"data": {
|
||
"sourceHandle": {
|
||
"dataType": "ChatInput",
|
||
"id": "ChatInput-ci8VE",
|
||
"name": "message",
|
||
"output_types": [
|
||
"Message"
|
||
]
|
||
},
|
||
"targetHandle": {
|
||
"fieldName": "input_value",
|
||
"id": "Agent-Nfw7u",
|
||
"inputTypes": [
|
||
"Message"
|
||
],
|
||
"type": "str"
|
||
}
|
||
},
|
||
"id": "xy-edge__ChatInput-ci8VE{œdataTypeœ:œChatInputœ,œidœ:œChatInput-ci8VEœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-Agent-Nfw7u{œfieldNameœ:œinput_valueœ,œidœ:œAgent-Nfw7uœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}",
|
||
"selected": false,
|
||
"source": "ChatInput-ci8VE",
|
||
"sourceHandle": "{œdataTypeœ:œChatInputœ,œidœ:œChatInput-ci8VEœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}",
|
||
"target": "Agent-Nfw7u",
|
||
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œAgent-Nfw7uœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}"
|
||
},
|
||
{
|
||
"animated": false,
|
||
"className": "",
|
||
"data": {
|
||
"sourceHandle": {
|
||
"dataType": "Agent",
|
||
"id": "Agent-Nfw7u",
|
||
"name": "response",
|
||
"output_types": [
|
||
"Message"
|
||
]
|
||
},
|
||
"targetHandle": {
|
||
"fieldName": "input_value",
|
||
"id": "ChatOutput-gWl8E",
|
||
"inputTypes": [
|
||
"Data",
|
||
"DataFrame",
|
||
"Message"
|
||
],
|
||
"type": "other"
|
||
}
|
||
},
|
||
"id": "xy-edge__Agent-Nfw7u{œdataTypeœ:œAgentœ,œidœ:œAgent-Nfw7uœ,œnameœ:œresponseœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-gWl8E{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-gWl8Eœ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}",
|
||
"selected": false,
|
||
"source": "Agent-Nfw7u",
|
||
"sourceHandle": "{œdataTypeœ:œAgentœ,œidœ:œAgent-Nfw7uœ,œnameœ:œresponseœ,œoutput_typesœ:[œMessageœ]}",
|
||
"target": "ChatOutput-gWl8E",
|
||
"targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-gWl8Eœ,œinputTypesœ:[œDataœ,œDataFrameœ,œMessageœ],œtypeœ:œotherœ}"
|
||
},
|
||
{
|
||
"animated": false,
|
||
"data": {
|
||
"sourceHandle": {
|
||
"dataType": "CalculatorComponent",
|
||
"id": "CalculatorComponent-KrlMH",
|
||
"name": "component_as_tool",
|
||
"output_types": [
|
||
"Tool"
|
||
]
|
||
},
|
||
"targetHandle": {
|
||
"fieldName": "tools",
|
||
"id": "Agent-Nfw7u",
|
||
"inputTypes": [
|
||
"Tool"
|
||
],
|
||
"type": "other"
|
||
}
|
||
},
|
||
"id": "xy-edge__CalculatorComponent-KrlMH{œdataTypeœ:œCalculatorComponentœ,œidœ:œCalculatorComponent-KrlMHœ,œnameœ:œcomponent_as_toolœ,œoutput_typesœ:[œToolœ]}-Agent-Nfw7u{œfieldNameœ:œtoolsœ,œidœ:œAgent-Nfw7uœ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}",
|
||
"selected": false,
|
||
"source": "CalculatorComponent-KrlMH",
|
||
"sourceHandle": "{œdataTypeœ:œCalculatorComponentœ,œidœ:œCalculatorComponent-KrlMHœ,œnameœ:œcomponent_as_toolœ,œoutput_typesœ:[œToolœ]}",
|
||
"target": "Agent-Nfw7u",
|
||
"targetHandle": "{œfieldNameœ:œtoolsœ,œidœ:œAgent-Nfw7uœ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}"
|
||
}
|
||
],
|
||
"nodes": [
|
||
{
|
||
"data": {
|
||
"id": "OpenSearch-iYfjf",
|
||
"node": {
|
||
"base_classes": [
|
||
"Data",
|
||
"DataFrame",
|
||
"VectorStore"
|
||
],
|
||
"beta": false,
|
||
"conditional_paths": [],
|
||
"custom_fields": {},
|
||
"description": "Store and search documents using OpenSearch with hybrid semantic and keyword search capabilities.",
|
||
"display_name": "OpenSearch",
|
||
"documentation": "",
|
||
"edited": true,
|
||
"field_order": [
|
||
"docs_metadata",
|
||
"opensearch_url",
|
||
"index_name",
|
||
"engine",
|
||
"space_type",
|
||
"ef_construction",
|
||
"m",
|
||
"ingest_data",
|
||
"search_query",
|
||
"should_cache_vector_store",
|
||
"embedding",
|
||
"vector_field",
|
||
"number_of_results",
|
||
"filter_expression",
|
||
"auth_mode",
|
||
"username",
|
||
"password",
|
||
"jwt_token",
|
||
"jwt_header",
|
||
"bearer_prefix",
|
||
"use_ssl",
|
||
"verify_certs"
|
||
],
|
||
"frozen": false,
|
||
"icon": "OpenSearch",
|
||
"last_updated": "2025-11-11T21:40:35.507Z",
|
||
"legacy": false,
|
||
"lf_version": "1.6.0",
|
||
"metadata": {
|
||
"code_hash": "07eef12db820",
|
||
"dependencies": {
|
||
"dependencies": [
|
||
{
|
||
"name": "opensearchpy",
|
||
"version": "2.8.0"
|
||
},
|
||
{
|
||
"name": "lfx",
|
||
"version": null
|
||
}
|
||
],
|
||
"total_dependencies": 2
|
||
},
|
||
"module": "custom_components.opensearch"
|
||
},
|
||
"minimized": false,
|
||
"output_types": [],
|
||
"outputs": [
|
||
{
|
||
"allows_loop": false,
|
||
"cache": true,
|
||
"display_name": "Toolset",
|
||
"group_outputs": false,
|
||
"hidden": null,
|
||
"method": "to_toolkit",
|
||
"name": "component_as_tool",
|
||
"options": null,
|
||
"required_inputs": null,
|
||
"selected": "Tool",
|
||
"tool_mode": true,
|
||
"types": [
|
||
"Tool"
|
||
],
|
||
"value": "__UNDEFINED__"
|
||
}
|
||
],
|
||
"pinned": false,
|
||
"template": {
|
||
"_type": "Component",
|
||
"auth_mode": {
|
||
"_input_type": "DropdownInput",
|
||
"advanced": false,
|
||
"combobox": false,
|
||
"dialog_inputs": {},
|
||
"display_name": "Authentication Mode",
|
||
"dynamic": false,
|
||
"external_options": {},
|
||
"info": "Authentication method: 'basic' for username/password authentication, or 'jwt' for JSON Web Token (Bearer) authentication.",
|
||
"load_from_db": false,
|
||
"name": "auth_mode",
|
||
"options": [
|
||
"basic",
|
||
"jwt"
|
||
],
|
||
"options_metadata": [],
|
||
"placeholder": "",
|
||
"real_time_refresh": true,
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"toggle": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": "jwt"
|
||
},
|
||
"bearer_prefix": {
|
||
"_input_type": "BoolInput",
|
||
"advanced": true,
|
||
"display_name": "Prefix 'Bearer '",
|
||
"dynamic": false,
|
||
"info": "",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "bearer_prefix",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": false,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "bool",
|
||
"value": true
|
||
},
|
||
"code": {
|
||
"advanced": true,
|
||
"dynamic": true,
|
||
"fileTypes": [],
|
||
"file_path": "",
|
||
"info": "",
|
||
"list": false,
|
||
"load_from_db": false,
|
||
"multiline": true,
|
||
"name": "code",
|
||
"password": false,
|
||
"placeholder": "",
|
||
"required": true,
|
||
"show": true,
|
||
"title_case": false,
|
||
"type": "code",
|
||
"value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom typing import Any, List, Optional\n\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponent(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"]\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"]),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model being used (e.g., 'text-embedding-3-small'). \"\n \"Used to create dynamic vector field names and track which model embedded each document. \"\n \"Auto-detected from embedding component if not specified.\"\n ),\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=False,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=False,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=True,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from embedding component\n if hasattr(self, \"embedding\") and self.embedding:\n if hasattr(self.embedding, \"model\"):\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_name\"):\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'model' or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\n \"type\": \"keyword\"\n },\n \"embedding_dimensions\": {\n \"type\": \"integer\"\n }\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n raise ValueError(\n f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n )\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n self.log(self.ingest_data)\n client = self.build_client()\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n docs = self.ingest_data or []\n if not docs:\n self.log(\"No documents to ingest.\")\n return\n\n # Get embedding model name\n embedding_model = self._get_embedding_model_name()\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n self.log(f\"Using embedding model: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return self.embedding.embed_documents([chunk_text])[0]\n\n vectors: Optional[List[List[float]]] = None\n last_exception: Optional[Exception] = None\n delay = 1.0\n attempts = 0\n\n while attempts < 3:\n attempts += 1\n try:\n max_workers = min(max(len(texts), 1), 8)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= 3:\n logger.error(\n \"Embedding generation failed after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed (attempt %s/%s), retrying in %.1fs\",\n attempts,\n 3,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed: {last_exception}\" if last_exception else \"Embedding generation failed\"\n )\n\n if not vectors:\n self.log(\"No vectors generated from documents.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(\n f\"Failed to create index '{self.index_name}': {creation_error}\"\n )\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\n \"size\": 0,\n \"aggs\": {\n \"embedding_models\": {\n \"terms\": {\n \"field\": \"embedding_model\",\n \"size\": 10\n }\n }\n }\n }\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\n \"bool\": {\n \"filter\": filter_clauses\n }\n }\n\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n return models\n except Exception as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n return [self._get_embedding_model_name()]\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except Exception as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(\n f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\"\n )\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return True\n\n return False\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models in parallel\n query_embeddings = {}\n\n # Note: Langflow is synchronous, so we can't use true async here\n # But we log the intent for parallel processing\n logger.info(f\"Generating embeddings for {len(available_models)} models\")\n\n original_model_attr = getattr(self.embedding, \"model\", None)\n original_deployment_attr = getattr(self.embedding, \"deployment\", None)\n original_dimensions_attr = getattr(self.embedding, \"dimensions\", None)\n\n for model_name in available_models:\n try:\n # In a real async environment, these would run in parallel\n # For now, they run sequentially\n if hasattr(self.embedding, \"model\"):\n setattr(self.embedding, \"model\", model_name)\n if hasattr(self.embedding, \"deployment\"):\n setattr(self.embedding, \"deployment\", model_name)\n if hasattr(self.embedding, \"dimensions\"):\n setattr(self.embedding, \"dimensions\", None)\n vec = self.embedding.embed_query(q)\n query_embeddings[model_name] = vec\n logger.info(f\"Generated embedding for model: {model_name}\")\n except Exception as e:\n logger.error(f\"Failed to generate embedding for {model_name}: {e}\")\n\n if hasattr(self.embedding, \"model\"):\n setattr(self.embedding, \"model\", original_model_attr)\n if hasattr(self.embedding, \"deployment\"):\n setattr(self.embedding, \"deployment\", original_deployment_attr)\n if hasattr(self.embedding, \"dimensions\"):\n setattr(self.embedding, \"dimensions\", original_dimensions_attr)\n\n if not query_embeddings:\n msg = \"Failed to generate embeddings for any model\"\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n continue\n\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\n \"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)],\n \"minimum_should_match\": 1\n }\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models\"\n )\n\n try:\n resp = client.search(\n index=self.index_name, body=body, params={\"terminate_after\": 0}\n )\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n raw = self.search(self.search_query or \"\")\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n self.log(self.ingest_data)\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n if is_basic:\n build_config[\"jwt_token\"][\"value\"] = \"\"\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n"
|
||
},
|
||
"docs_metadata": {
|
||
"_input_type": "TableInput",
|
||
"advanced": false,
|
||
"display_name": "Document Metadata",
|
||
"dynamic": false,
|
||
"info": "Additional metadata key-value pairs to be added to all ingested documents. Useful for tagging documents with source information, categories, or other custom attributes.",
|
||
"input_types": [
|
||
"Data"
|
||
],
|
||
"is_list": true,
|
||
"list_add_label": "Add More",
|
||
"name": "docs_metadata",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"table_icon": "Table",
|
||
"table_schema": [
|
||
{
|
||
"description": "Key name",
|
||
"display_name": "Key",
|
||
"formatter": "text",
|
||
"name": "key",
|
||
"type": "str"
|
||
},
|
||
{
|
||
"description": "Value of the metadata",
|
||
"display_name": "Value",
|
||
"formatter": "text",
|
||
"name": "value",
|
||
"type": "str"
|
||
}
|
||
],
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"trigger_icon": "Table",
|
||
"trigger_text": "Open table",
|
||
"type": "table",
|
||
"value": []
|
||
},
|
||
"ef_construction": {
|
||
"_input_type": "IntInput",
|
||
"advanced": true,
|
||
"display_name": "EF Construction",
|
||
"dynamic": false,
|
||
"info": "Size of the dynamic candidate list during index construction. Higher values improve recall but increase indexing time and memory usage.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "ef_construction",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "int",
|
||
"value": 512
|
||
},
|
||
"embedding": {
|
||
"_input_type": "HandleInput",
|
||
"advanced": false,
|
||
"display_name": "Embedding",
|
||
"dynamic": false,
|
||
"info": "",
|
||
"input_types": [
|
||
"Embeddings"
|
||
],
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "embedding",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"trace_as_metadata": true,
|
||
"type": "other",
|
||
"value": ""
|
||
},
|
||
"engine": {
|
||
"_input_type": "DropdownInput",
|
||
"advanced": true,
|
||
"combobox": false,
|
||
"dialog_inputs": {},
|
||
"display_name": "Vector Engine",
|
||
"dynamic": false,
|
||
"external_options": {},
|
||
"info": "Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.",
|
||
"name": "engine",
|
||
"options": [
|
||
"jvector",
|
||
"nmslib",
|
||
"faiss",
|
||
"lucene"
|
||
],
|
||
"options_metadata": [],
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"toggle": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": "jvector"
|
||
},
|
||
"filter_expression": {
|
||
"_input_type": "MultilineInput",
|
||
"advanced": false,
|
||
"copy_field": false,
|
||
"display_name": "Search Filters (JSON)",
|
||
"dynamic": false,
|
||
"info": "Optional JSON configuration for search filtering, result limits, and score thresholds.\n\nFormat 1 - Explicit filters:\n{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, {\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\n\nFormat 2 - Context-style mapping:\n{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\n\nUse __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.",
|
||
"input_types": [
|
||
"Message"
|
||
],
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"multiline": true,
|
||
"name": "filter_expression",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_input": true,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": ""
|
||
},
|
||
"index_name": {
|
||
"_input_type": "StrInput",
|
||
"advanced": false,
|
||
"display_name": "Index Name",
|
||
"dynamic": false,
|
||
"info": "The OpenSearch index name where documents will be stored and searched. Will be created automatically if it doesn't exist.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"name": "index_name",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": "documents"
|
||
},
|
||
"ingest_data": {
|
||
"_input_type": "HandleInput",
|
||
"advanced": false,
|
||
"display_name": "Ingest Data",
|
||
"dynamic": false,
|
||
"info": "",
|
||
"input_types": [
|
||
"Data",
|
||
"DataFrame"
|
||
],
|
||
"list": true,
|
||
"list_add_label": "Add More",
|
||
"name": "ingest_data",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"trace_as_metadata": true,
|
||
"type": "other",
|
||
"value": ""
|
||
},
|
||
"jwt_header": {
|
||
"_input_type": "StrInput",
|
||
"advanced": true,
|
||
"display_name": "JWT Header Name",
|
||
"dynamic": false,
|
||
"info": "",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"name": "jwt_header",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": false,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": "Authorization"
|
||
},
|
||
"jwt_token": {
|
||
"_input_type": "SecretStrInput",
|
||
"advanced": false,
|
||
"display_name": "JWT Token",
|
||
"dynamic": false,
|
||
"info": "Valid JSON Web Token for authentication. Will be sent in the Authorization header (with optional 'Bearer ' prefix).",
|
||
"input_types": [],
|
||
"load_from_db": true,
|
||
"name": "jwt_token",
|
||
"password": true,
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"type": "str",
|
||
"value": "JWT"
|
||
},
|
||
"m": {
|
||
"_input_type": "IntInput",
|
||
"advanced": true,
|
||
"display_name": "M Parameter",
|
||
"dynamic": false,
|
||
"info": "Number of bidirectional connections for each vector in the HNSW graph. Higher values improve search quality but increase memory usage and indexing time.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "m",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "int",
|
||
"value": 16
|
||
},
|
||
"number_of_results": {
|
||
"_input_type": "IntInput",
|
||
"advanced": true,
|
||
"display_name": "Default Result Limit",
|
||
"dynamic": false,
|
||
"info": "Default maximum number of search results to return when no limit is specified in the filter expression.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"name": "number_of_results",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "int",
|
||
"value": 4
|
||
},
|
||
"opensearch_url": {
|
||
"_input_type": "StrInput",
|
||
"advanced": false,
|
||
"display_name": "OpenSearch URL",
|
||
"dynamic": false,
|
||
"info": "The connection URL for your OpenSearch cluster (e.g., http://localhost:9200 for local development or your cloud endpoint).",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"name": "opensearch_url",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": "https://opensearch:9200"
|
||
},
|
||
"password": {
|
||
"_input_type": "SecretStrInput",
|
||
"advanced": false,
|
||
"display_name": "OpenSearch Password",
|
||
"dynamic": false,
|
||
"info": "",
|
||
"input_types": [],
|
||
"load_from_db": false,
|
||
"name": "password",
|
||
"password": true,
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": false,
|
||
"title_case": false,
|
||
"type": "str",
|
||
"value": ""
|
||
},
|
||
"search_query": {
|
||
"_input_type": "QueryInput",
|
||
"advanced": false,
|
||
"display_name": "Search Query",
|
||
"dynamic": false,
|
||
"info": "Enter a query to run a similarity search.",
|
||
"input_types": [
|
||
"Message"
|
||
],
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"name": "search_query",
|
||
"placeholder": "Enter a query...",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": true,
|
||
"trace_as_input": true,
|
||
"trace_as_metadata": true,
|
||
"type": "query",
|
||
"value": ""
|
||
},
|
||
"should_cache_vector_store": {
|
||
"_input_type": "BoolInput",
|
||
"advanced": true,
|
||
"display_name": "Cache Vector Store",
|
||
"dynamic": false,
|
||
"info": "If True, the vector store will be cached for the current build of the component. This is useful for components that have multiple output methods and want to share the same vector store.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "should_cache_vector_store",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "bool",
|
||
"value": true
|
||
},
|
||
"space_type": {
|
||
"_input_type": "DropdownInput",
|
||
"advanced": true,
|
||
"combobox": false,
|
||
"dialog_inputs": {},
|
||
"display_name": "Distance Metric",
|
||
"dynamic": false,
|
||
"external_options": {},
|
||
"info": "Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, 'cosinesimil' for cosine similarity, 'innerproduct' for dot product.",
|
||
"name": "space_type",
|
||
"options": [
|
||
"l2",
|
||
"l1",
|
||
"cosinesimil",
|
||
"linf",
|
||
"innerproduct"
|
||
],
|
||
"options_metadata": [],
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"toggle": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": "l2"
|
||
},
|
||
"tools_metadata": {
|
||
"_input_type": "ToolsInput",
|
||
"advanced": false,
|
||
"display_name": "Actions",
|
||
"dynamic": false,
|
||
"info": "Modify tool names and descriptions to help agents understand when to use each tool.",
|
||
"is_list": true,
|
||
"list_add_label": "Add More",
|
||
"name": "tools_metadata",
|
||
"placeholder": "",
|
||
"real_time_refresh": true,
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "tools",
|
||
"value": [
|
||
{
|
||
"args": {
|
||
"search_query": {
|
||
"default": "",
|
||
"description": "Enter a query to run a similarity search.",
|
||
"title": "Search Query",
|
||
"type": "string"
|
||
}
|
||
},
|
||
"description": "Store and search documents using OpenSearch with hybrid semantic and keyword search capabilities.",
|
||
"display_description": "Store and search documents using OpenSearch with hybrid semantic and keyword search capabilities.",
|
||
"display_name": "search_documents",
|
||
"name": "search_documents",
|
||
"readonly": false,
|
||
"status": true,
|
||
"tags": [
|
||
"search_documents"
|
||
]
|
||
},
|
||
{
|
||
"args": {
|
||
"search_query": {
|
||
"default": "",
|
||
"description": "Enter a query to run a similarity search.",
|
||
"title": "Search Query",
|
||
"type": "string"
|
||
}
|
||
},
|
||
"description": "Store and search documents using OpenSearch with hybrid semantic and keyword search capabilities.",
|
||
"display_description": "Store and search documents using OpenSearch with hybrid semantic and keyword search capabilities.",
|
||
"display_name": "as_dataframe",
|
||
"name": "as_dataframe",
|
||
"readonly": false,
|
||
"status": false,
|
||
"tags": [
|
||
"as_dataframe"
|
||
]
|
||
},
|
||
{
|
||
"args": {
|
||
"search_query": {
|
||
"default": "",
|
||
"description": "Enter a query to run a similarity search.",
|
||
"title": "Search Query",
|
||
"type": "string"
|
||
}
|
||
},
|
||
"description": "Store and search documents using OpenSearch with hybrid semantic and keyword search capabilities.",
|
||
"display_description": "Store and search documents using OpenSearch with hybrid semantic and keyword search capabilities.",
|
||
"display_name": "as_vector_store",
|
||
"name": "as_vector_store",
|
||
"readonly": false,
|
||
"status": false,
|
||
"tags": [
|
||
"as_vector_store"
|
||
]
|
||
}
|
||
]
|
||
},
|
||
"use_ssl": {
|
||
"_input_type": "BoolInput",
|
||
"advanced": true,
|
||
"display_name": "Use SSL/TLS",
|
||
"dynamic": false,
|
||
"info": "Enable SSL/TLS encryption for secure connections to OpenSearch.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "use_ssl",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "bool",
|
||
"value": true
|
||
},
|
||
"username": {
|
||
"_input_type": "StrInput",
|
||
"advanced": false,
|
||
"display_name": "Username",
|
||
"dynamic": false,
|
||
"info": "",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"name": "username",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": false,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": "admin"
|
||
},
|
||
"vector_field": {
|
||
"_input_type": "StrInput",
|
||
"advanced": true,
|
||
"display_name": "Vector Field Name",
|
||
"dynamic": false,
|
||
"info": "Name of the field in OpenSearch documents that stores the vector embeddings for similarity search.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"name": "vector_field",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": "chunk_embedding"
|
||
},
|
||
"verify_certs": {
|
||
"_input_type": "BoolInput",
|
||
"advanced": true,
|
||
"display_name": "Verify SSL Certificates",
|
||
"dynamic": false,
|
||
"info": "Verify SSL certificates when connecting. Disable for self-signed certificates in development environments.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "verify_certs",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "bool",
|
||
"value": false
|
||
}
|
||
},
|
||
"tool_mode": true
|
||
},
|
||
"selected_output": "search_results",
|
||
"showNode": true,
|
||
"type": "OpenSearchVectorStoreComponent"
|
||
},
|
||
"dragging": false,
|
||
"id": "OpenSearch-iYfjf",
|
||
"measured": {
|
||
"height": 820,
|
||
"width": 320
|
||
},
|
||
"position": {
|
||
"x": 1183.2560374129,
|
||
"y": 320.1264495479339
|
||
},
|
||
"selected": false,
|
||
"type": "genericNode"
|
||
},
|
||
{
|
||
"data": {
|
||
"id": "TextInput-aHsQb",
|
||
"node": {
|
||
"base_classes": [
|
||
"Message"
|
||
],
|
||
"beta": false,
|
||
"conditional_paths": [],
|
||
"custom_fields": {},
|
||
"description": "Get user text inputs.",
|
||
"display_name": "Text Input",
|
||
"documentation": "https://docs.langflow.org/components-io#text-input",
|
||
"edited": true,
|
||
"field_order": [
|
||
"input_value"
|
||
],
|
||
"frozen": false,
|
||
"icon": "type",
|
||
"legacy": false,
|
||
"lf_version": "1.6.0",
|
||
"metadata": {},
|
||
"minimized": false,
|
||
"output_types": [],
|
||
"outputs": [
|
||
{
|
||
"allows_loop": false,
|
||
"cache": true,
|
||
"display_name": "Output Text",
|
||
"group_outputs": false,
|
||
"hidden": null,
|
||
"method": "text_response",
|
||
"name": "text",
|
||
"options": null,
|
||
"required_inputs": null,
|
||
"selected": "Message",
|
||
"tool_mode": true,
|
||
"types": [
|
||
"Message"
|
||
],
|
||
"value": "__UNDEFINED__"
|
||
}
|
||
],
|
||
"pinned": false,
|
||
"template": {
|
||
"_type": "Component",
|
||
"code": {
|
||
"advanced": true,
|
||
"dynamic": true,
|
||
"fileTypes": [],
|
||
"file_path": "",
|
||
"info": "",
|
||
"list": false,
|
||
"load_from_db": false,
|
||
"multiline": true,
|
||
"name": "code",
|
||
"password": false,
|
||
"placeholder": "",
|
||
"required": true,
|
||
"show": true,
|
||
"title_case": false,
|
||
"type": "code",
|
||
"value": "from langflow.base.io.text import TextComponent\nfrom langflow.io import SecretStrInput, Output\nfrom langflow.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get user text inputs.\"\n documentation: str = \"https://docs.langflow.org/components-io#text-input\"\n icon = \"type\"\n name = \"TextInput\"\n\n inputs = [\n SecretStrInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n ]\n outputs = [\n Output(display_name=\"Output Text\", name=\"text\", method=\"text_response\"),\n ]\n\n def text_response(self) -> Message:\n return Message(\n text=self.input_value,\n )\n"
|
||
},
|
||
"input_value": {
|
||
"_input_type": "SecretStrInput",
|
||
"advanced": false,
|
||
"display_name": "Text",
|
||
"dynamic": false,
|
||
"info": "Text to be passed as input.",
|
||
"input_types": [],
|
||
"load_from_db": true,
|
||
"name": "input_value",
|
||
"password": true,
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"type": "str",
|
||
"value": "OPENRAG-QUERY-FILTER"
|
||
}
|
||
},
|
||
"tool_mode": false
|
||
},
|
||
"showNode": true,
|
||
"type": "TextInput"
|
||
},
|
||
"dragging": false,
|
||
"id": "TextInput-aHsQb",
|
||
"measured": {
|
||
"height": 204,
|
||
"width": 320
|
||
},
|
||
"position": {
|
||
"x": 722.0477041311764,
|
||
"y": 119.75705309395346
|
||
},
|
||
"selected": false,
|
||
"type": "genericNode"
|
||
},
|
||
{
|
||
"data": {
|
||
"id": "MCP-7EY21",
|
||
"node": {
|
||
"base_classes": [
|
||
"DataFrame"
|
||
],
|
||
"beta": false,
|
||
"category": "MCP",
|
||
"conditional_paths": [],
|
||
"custom_fields": {},
|
||
"description": "Connect to an MCP server to use its tools.",
|
||
"display_name": "MCP Tools",
|
||
"documentation": "https://docs.langflow.org/mcp-client",
|
||
"edited": false,
|
||
"field_order": [
|
||
"mcp_server",
|
||
"use_cache",
|
||
"tool",
|
||
"tool_placeholder"
|
||
],
|
||
"frozen": false,
|
||
"icon": "Mcp",
|
||
"key": "mcp_lf-starter_project",
|
||
"last_updated": "2025-11-11T21:40:35.508Z",
|
||
"legacy": false,
|
||
"mcpServerName": "lf-starter_project",
|
||
"metadata": {
|
||
"code_hash": "756d1e10d0ca",
|
||
"dependencies": {
|
||
"dependencies": [
|
||
{
|
||
"name": "langchain_core",
|
||
"version": "0.3.77"
|
||
},
|
||
{
|
||
"name": "lfx",
|
||
"version": null
|
||
},
|
||
{
|
||
"name": "langflow",
|
||
"version": null
|
||
}
|
||
],
|
||
"total_dependencies": 3
|
||
},
|
||
"module": "lfx.components.agents.mcp_component.MCPToolsComponent"
|
||
},
|
||
"minimized": false,
|
||
"output_types": [],
|
||
"outputs": [
|
||
{
|
||
"allows_loop": false,
|
||
"cache": true,
|
||
"display_name": "Toolset",
|
||
"group_outputs": false,
|
||
"hidden": null,
|
||
"method": "to_toolkit",
|
||
"name": "component_as_tool",
|
||
"options": null,
|
||
"required_inputs": null,
|
||
"selected": "Tool",
|
||
"tool_mode": true,
|
||
"types": [
|
||
"Tool"
|
||
],
|
||
"value": "__UNDEFINED__"
|
||
}
|
||
],
|
||
"pinned": false,
|
||
"template": {
|
||
"_type": "Component",
|
||
"code": {
|
||
"advanced": true,
|
||
"dynamic": true,
|
||
"fileTypes": [],
|
||
"file_path": "",
|
||
"info": "",
|
||
"list": false,
|
||
"load_from_db": false,
|
||
"multiline": true,
|
||
"name": "code",
|
||
"password": false,
|
||
"placeholder": "",
|
||
"required": true,
|
||
"show": true,
|
||
"title_case": false,
|
||
"type": "code",
|
||
"value": "from __future__ import annotations\n\nimport asyncio\nimport uuid\nfrom typing import Any\n\nfrom langchain_core.tools import StructuredTool # noqa: TC002\n\nfrom lfx.base.agents.utils import maybe_unflatten_dict, safe_cache_get, safe_cache_set\nfrom lfx.base.mcp.util import MCPSseClient, MCPStdioClient, create_input_schema_from_json_schema, update_tools\nfrom lfx.custom.custom_component.component_with_cache import ComponentWithCache\nfrom lfx.inputs.inputs import InputTypes # noqa: TC001\nfrom lfx.io import BoolInput, DropdownInput, McpInput, MessageTextInput, Output\nfrom lfx.io.schema import flatten_schema, schema_to_langflow_inputs\nfrom lfx.log.logger import logger\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.services.deps import get_settings_service, get_storage_service, session_scope\n\n\nclass MCPToolsComponent(ComponentWithCache):\n schema_inputs: list = []\n tools: list[StructuredTool] = []\n _not_load_actions: bool = False\n _tool_cache: dict = {}\n _last_selected_server: str | None = None # Cache for the last selected server\n\n def __init__(self, **data) -> None:\n super().__init__(**data)\n # Initialize cache keys to avoid CacheMiss when accessing them\n self._ensure_cache_structure()\n\n # Initialize clients with access to the component cache\n self.stdio_client: MCPStdioClient = MCPStdioClient(component_cache=self._shared_component_cache)\n self.sse_client: MCPSseClient = MCPSseClient(component_cache=self._shared_component_cache)\n\n def _ensure_cache_structure(self):\n \"\"\"Ensure the cache has the required structure.\"\"\"\n # Check if servers key exists and is not CacheMiss\n servers_value = safe_cache_get(self._shared_component_cache, \"servers\")\n if servers_value is None:\n safe_cache_set(self._shared_component_cache, \"servers\", {})\n\n # Check if last_selected_server key exists and is not CacheMiss\n last_server_value = safe_cache_get(self._shared_component_cache, \"last_selected_server\")\n if last_server_value is None:\n safe_cache_set(self._shared_component_cache, \"last_selected_server\", \"\")\n\n default_keys: list[str] = [\n \"code\",\n \"_type\",\n \"tool_mode\",\n \"tool_placeholder\",\n \"mcp_server\",\n \"tool\",\n \"use_cache\",\n ]\n\n display_name = \"MCP Tools\"\n description = \"Connect to an MCP server to use its tools.\"\n documentation: str = \"https://docs.langflow.org/mcp-client\"\n icon = \"Mcp\"\n name = \"MCPTools\"\n\n inputs = [\n McpInput(\n name=\"mcp_server\",\n display_name=\"MCP Server\",\n info=\"Select the MCP Server that will be used by this component\",\n real_time_refresh=True,\n ),\n BoolInput(\n name=\"use_cache\",\n display_name=\"Use Cached Server\",\n info=(\n \"Enable caching of MCP Server and tools to improve performance. \"\n \"Disable to always fetch fresh tools and server updates.\"\n ),\n value=False,\n advanced=True,\n ),\n DropdownInput(\n name=\"tool\",\n display_name=\"Tool\",\n options=[],\n value=\"\",\n info=\"Select the tool to execute\",\n show=False,\n required=True,\n real_time_refresh=True,\n ),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n info=\"Placeholder for the tool\",\n value=\"\",\n show=False,\n tool_mode=False,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Response\", name=\"response\", method=\"build_output\"),\n ]\n\n async def _validate_schema_inputs(self, tool_obj) -> list[InputTypes]:\n \"\"\"Validate and process schema inputs for a tool.\"\"\"\n try:\n if not tool_obj or not hasattr(tool_obj, \"args_schema\"):\n msg = \"Invalid tool object or missing input schema\"\n raise ValueError(msg)\n\n flat_schema = flatten_schema(tool_obj.args_schema.schema())\n input_schema = create_input_schema_from_json_schema(flat_schema)\n if not input_schema:\n msg = f\"Empty input schema for tool '{tool_obj.name}'\"\n raise ValueError(msg)\n\n schema_inputs = schema_to_langflow_inputs(input_schema)\n if not schema_inputs:\n msg = f\"No input parameters defined for tool '{tool_obj.name}'\"\n await logger.awarning(msg)\n return []\n\n except Exception as e:\n msg = f\"Error validating schema inputs: {e!s}\"\n await logger.aexception(msg)\n raise ValueError(msg) from e\n else:\n return schema_inputs\n\n async def update_tool_list(self, mcp_server_value=None):\n # Accepts mcp_server_value as dict {name, config} or uses self.mcp_server\n mcp_server = mcp_server_value if mcp_server_value is not None else getattr(self, \"mcp_server\", None)\n server_name = None\n server_config_from_value = None\n if isinstance(mcp_server, dict):\n server_name = mcp_server.get(\"name\")\n server_config_from_value = mcp_server.get(\"config\")\n else:\n server_name = mcp_server\n if not server_name:\n self.tools = []\n return [], {\"name\": server_name, \"config\": server_config_from_value}\n\n # Check if caching is enabled, default to False\n use_cache = getattr(self, \"use_cache\", False)\n\n # Use shared cache if available and caching is enabled\n cached = None\n if use_cache:\n servers_cache = safe_cache_get(self._shared_component_cache, \"servers\", {})\n cached = servers_cache.get(server_name) if isinstance(servers_cache, dict) else None\n\n if cached is not None:\n try:\n self.tools = cached[\"tools\"]\n self.tool_names = cached[\"tool_names\"]\n self._tool_cache = cached[\"tool_cache\"]\n server_config_from_value = cached[\"config\"]\n except (TypeError, KeyError, AttributeError) as e:\n # Handle corrupted cache data by clearing it and continuing to fetch fresh tools\n msg = f\"Unable to use cached data for MCP Server{server_name}: {e}\"\n await logger.awarning(msg)\n # Clear the corrupted cache entry\n current_servers_cache = safe_cache_get(self._shared_component_cache, \"servers\", {})\n if isinstance(current_servers_cache, dict) and server_name in current_servers_cache:\n current_servers_cache.pop(server_name)\n safe_cache_set(self._shared_component_cache, \"servers\", current_servers_cache)\n else:\n return self.tools, {\"name\": server_name, \"config\": server_config_from_value}\n\n try:\n try:\n from langflow.api.v2.mcp import get_server\n from langflow.services.database.models.user.crud import get_user_by_id\n except ImportError as e:\n msg = (\n \"Langflow MCP server functionality is not available. \"\n \"This feature requires the full Langflow installation.\"\n )\n raise ImportError(msg) from e\n async with session_scope() as db:\n if not self.user_id:\n msg = \"User ID is required for fetching MCP tools.\"\n raise ValueError(msg)\n current_user = await get_user_by_id(db, self.user_id)\n\n # Try to get server config from DB/API\n server_config = await get_server(\n server_name,\n current_user,\n db,\n storage_service=get_storage_service(),\n settings_service=get_settings_service(),\n )\n\n # If get_server returns empty but we have a config, use it\n if not server_config and server_config_from_value:\n server_config = server_config_from_value\n\n if not server_config:\n self.tools = []\n return [], {\"name\": server_name, \"config\": server_config}\n\n _, tool_list, tool_cache = await update_tools(\n server_name=server_name,\n server_config=server_config,\n mcp_stdio_client=self.stdio_client,\n mcp_sse_client=self.sse_client,\n )\n\n self.tool_names = [tool.name for tool in tool_list if hasattr(tool, \"name\")]\n self._tool_cache = tool_cache\n self.tools = tool_list\n\n # Cache the result only if caching is enabled\n if use_cache:\n cache_data = {\n \"tools\": tool_list,\n \"tool_names\": self.tool_names,\n \"tool_cache\": tool_cache,\n \"config\": server_config,\n }\n\n # Safely update the servers cache\n current_servers_cache = safe_cache_get(self._shared_component_cache, \"servers\", {})\n if isinstance(current_servers_cache, dict):\n current_servers_cache[server_name] = cache_data\n safe_cache_set(self._shared_component_cache, \"servers\", current_servers_cache)\n\n except (TimeoutError, asyncio.TimeoutError) as e:\n msg = f\"Timeout updating tool list: {e!s}\"\n await logger.aexception(msg)\n raise TimeoutError(msg) from e\n except Exception as e:\n msg = f\"Error updating tool list: {e!s}\"\n await logger.aexception(msg)\n raise ValueError(msg) from e\n else:\n return tool_list, {\"name\": server_name, \"config\": server_config}\n\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Toggle the visibility of connection-specific fields based on the selected mode.\"\"\"\n try:\n if field_name == \"tool\":\n try:\n if len(self.tools) == 0:\n try:\n self.tools, build_config[\"mcp_server\"][\"value\"] = await self.update_tool_list()\n build_config[\"tool\"][\"options\"] = [tool.name for tool in self.tools]\n build_config[\"tool\"][\"placeholder\"] = \"Select a tool\"\n except (TimeoutError, asyncio.TimeoutError) as e:\n msg = f\"Timeout updating tool list: {e!s}\"\n await logger.aexception(msg)\n if not build_config[\"tools_metadata\"][\"show\"]:\n build_config[\"tool\"][\"show\"] = True\n build_config[\"tool\"][\"options\"] = []\n build_config[\"tool\"][\"value\"] = \"\"\n build_config[\"tool\"][\"placeholder\"] = \"Timeout on MCP server\"\n else:\n build_config[\"tool\"][\"show\"] = False\n except ValueError:\n if not build_config[\"tools_metadata\"][\"show\"]:\n build_config[\"tool\"][\"show\"] = True\n build_config[\"tool\"][\"options\"] = []\n build_config[\"tool\"][\"value\"] = \"\"\n build_config[\"tool\"][\"placeholder\"] = \"Error on MCP Server\"\n else:\n build_config[\"tool\"][\"show\"] = False\n\n if field_value == \"\":\n return build_config\n tool_obj = None\n for tool in self.tools:\n if tool.name == field_value:\n tool_obj = tool\n break\n if tool_obj is None:\n msg = f\"Tool {field_value} not found in available tools: {self.tools}\"\n await logger.awarning(msg)\n return build_config\n await self._update_tool_config(build_config, field_value)\n except Exception as e:\n build_config[\"tool\"][\"options\"] = []\n msg = f\"Failed to update tools: {e!s}\"\n raise ValueError(msg) from e\n else:\n return build_config\n elif field_name == \"mcp_server\":\n if not field_value:\n build_config[\"tool\"][\"show\"] = False\n build_config[\"tool\"][\"options\"] = []\n build_config[\"tool\"][\"value\"] = \"\"\n build_config[\"tool\"][\"placeholder\"] = \"\"\n build_config[\"tool_placeholder\"][\"tool_mode\"] = False\n self.remove_non_default_keys(build_config)\n return build_config\n\n build_config[\"tool_placeholder\"][\"tool_mode\"] = True\n\n current_server_name = field_value.get(\"name\") if isinstance(field_value, dict) else field_value\n _last_selected_server = safe_cache_get(self._shared_component_cache, \"last_selected_server\", \"\")\n\n # To avoid unnecessary updates, only proceed if the server has actually changed\n if (_last_selected_server in (current_server_name, \"\")) and build_config[\"tool\"][\"show\"]:\n if current_server_name:\n servers_cache = safe_cache_get(self._shared_component_cache, \"servers\", {})\n if isinstance(servers_cache, dict):\n cached = servers_cache.get(current_server_name)\n if cached is not None and cached.get(\"tool_names\"):\n cached_tools = cached[\"tool_names\"]\n current_tools = build_config[\"tool\"][\"options\"]\n if current_tools == cached_tools:\n return build_config\n else:\n return build_config\n\n # Determine if \"Tool Mode\" is active by checking if the tool dropdown is hidden.\n is_in_tool_mode = build_config[\"tools_metadata\"][\"show\"]\n safe_cache_set(self._shared_component_cache, \"last_selected_server\", current_server_name)\n\n # Check if tools are already cached for this server before clearing\n cached_tools = None\n if current_server_name:\n use_cache = getattr(self, \"use_cache\", True)\n if use_cache:\n servers_cache = safe_cache_get(self._shared_component_cache, \"servers\", {})\n if isinstance(servers_cache, dict):\n cached = servers_cache.get(current_server_name)\n if cached is not None:\n try:\n cached_tools = cached[\"tools\"]\n self.tools = cached_tools\n self.tool_names = cached[\"tool_names\"]\n self._tool_cache = cached[\"tool_cache\"]\n except (TypeError, KeyError, AttributeError) as e:\n # Handle corrupted cache data by ignoring it\n msg = f\"Unable to use cached data for MCP Server,{current_server_name}: {e}\"\n await logger.awarning(msg)\n cached_tools = None\n\n # Only clear tools if we don't have cached tools for the current server\n if not cached_tools:\n self.tools = [] # Clear previous tools only if no cache\n\n self.remove_non_default_keys(build_config) # Clear previous tool inputs\n\n # Only show the tool dropdown if not in tool_mode\n if not is_in_tool_mode:\n build_config[\"tool\"][\"show\"] = True\n if cached_tools:\n # Use cached tools to populate options immediately\n build_config[\"tool\"][\"options\"] = [tool.name for tool in cached_tools]\n build_config[\"tool\"][\"placeholder\"] = \"Select a tool\"\n else:\n # Show loading state only when we need to fetch tools\n build_config[\"tool\"][\"placeholder\"] = \"Loading tools...\"\n build_config[\"tool\"][\"options\"] = []\n build_config[\"tool\"][\"value\"] = uuid.uuid4()\n else:\n # Keep the tool dropdown hidden if in tool_mode\n self._not_load_actions = True\n build_config[\"tool\"][\"show\"] = False\n\n elif field_name == \"tool_mode\":\n build_config[\"tool\"][\"placeholder\"] = \"\"\n build_config[\"tool\"][\"show\"] = not bool(field_value) and bool(build_config[\"mcp_server\"])\n self.remove_non_default_keys(build_config)\n self.tool = build_config[\"tool\"][\"value\"]\n if field_value:\n self._not_load_actions = True\n else:\n build_config[\"tool\"][\"value\"] = uuid.uuid4()\n build_config[\"tool\"][\"options\"] = []\n build_config[\"tool\"][\"show\"] = True\n build_config[\"tool\"][\"placeholder\"] = \"Loading tools...\"\n elif field_name == \"tools_metadata\":\n self._not_load_actions = False\n\n except Exception as e:\n msg = f\"Error in update_build_config: {e!s}\"\n await logger.aexception(msg)\n raise ValueError(msg) from e\n else:\n return build_config\n\n def get_inputs_for_all_tools(self, tools: list) -> dict:\n \"\"\"Get input schemas for all tools.\"\"\"\n inputs = {}\n for tool in tools:\n if not tool or not hasattr(tool, \"name\"):\n continue\n try:\n flat_schema = flatten_schema(tool.args_schema.schema())\n input_schema = create_input_schema_from_json_schema(flat_schema)\n langflow_inputs = schema_to_langflow_inputs(input_schema)\n inputs[tool.name] = langflow_inputs\n except (AttributeError, ValueError, TypeError, KeyError) as e:\n msg = f\"Error getting inputs for tool {getattr(tool, 'name', 'unknown')}: {e!s}\"\n logger.exception(msg)\n continue\n return inputs\n\n def remove_input_schema_from_build_config(\n self, build_config: dict, tool_name: str, input_schema: dict[list[InputTypes], Any]\n ):\n \"\"\"Remove the input schema for the tool from the build config.\"\"\"\n # Keep only schemas that don't belong to the current tool\n input_schema = {k: v for k, v in input_schema.items() if k != tool_name}\n # Remove all inputs from other tools\n for value in input_schema.values():\n for _input in value:\n if _input.name in build_config:\n build_config.pop(_input.name)\n\n def remove_non_default_keys(self, build_config: dict) -> None:\n \"\"\"Remove non-default keys from the build config.\"\"\"\n for key in list(build_config.keys()):\n if key not in self.default_keys:\n build_config.pop(key)\n\n async def _update_tool_config(self, build_config: dict, tool_name: str) -> None:\n \"\"\"Update tool configuration with proper error handling.\"\"\"\n if not self.tools:\n self.tools, build_config[\"mcp_server\"][\"value\"] = await self.update_tool_list()\n\n if not tool_name:\n return\n\n tool_obj = next((tool for tool in self.tools if tool.name == tool_name), None)\n if not tool_obj:\n msg = f\"Tool {tool_name} not found in available tools: {self.tools}\"\n self.remove_non_default_keys(build_config)\n build_config[\"tool\"][\"value\"] = \"\"\n await logger.awarning(msg)\n return\n\n try:\n # Store current values before removing inputs\n current_values = {}\n for key, value in build_config.items():\n if key not in self.default_keys and isinstance(value, dict) and \"value\" in value:\n current_values[key] = value[\"value\"]\n\n # Get all tool inputs and remove old ones\n input_schema_for_all_tools = self.get_inputs_for_all_tools(self.tools)\n self.remove_input_schema_from_build_config(build_config, tool_name, input_schema_for_all_tools)\n\n # Get and validate new inputs\n self.schema_inputs = await self._validate_schema_inputs(tool_obj)\n if not self.schema_inputs:\n msg = f\"No input parameters to configure for tool '{tool_name}'\"\n await logger.ainfo(msg)\n return\n\n # Add new inputs to build config\n for schema_input in self.schema_inputs:\n if not schema_input or not hasattr(schema_input, \"name\"):\n msg = \"Invalid schema input detected, skipping\"\n await logger.awarning(msg)\n continue\n\n try:\n name = schema_input.name\n input_dict = schema_input.to_dict()\n input_dict.setdefault(\"value\", None)\n input_dict.setdefault(\"required\", True)\n\n build_config[name] = input_dict\n\n # Preserve existing value if the parameter name exists in current_values\n if name in current_values:\n build_config[name][\"value\"] = current_values[name]\n\n except (AttributeError, KeyError, TypeError) as e:\n msg = f\"Error processing schema input {schema_input}: {e!s}\"\n await logger.aexception(msg)\n continue\n except ValueError as e:\n msg = f\"Schema validation error for tool {tool_name}: {e!s}\"\n await logger.aexception(msg)\n self.schema_inputs = []\n return\n except (AttributeError, KeyError, TypeError) as e:\n msg = f\"Error updating tool config: {e!s}\"\n await logger.aexception(msg)\n raise ValueError(msg) from e\n\n async def build_output(self) -> DataFrame:\n \"\"\"Build output with improved error handling and validation.\"\"\"\n try:\n self.tools, _ = await self.update_tool_list()\n if self.tool != \"\":\n # Set session context for persistent MCP sessions using Langflow session ID\n session_context = self._get_session_context()\n if session_context:\n self.stdio_client.set_session_context(session_context)\n self.sse_client.set_session_context(session_context)\n\n exec_tool = self._tool_cache[self.tool]\n tool_args = self.get_inputs_for_all_tools(self.tools)[self.tool]\n kwargs = {}\n for arg in tool_args:\n value = getattr(self, arg.name, None)\n if value is not None:\n if isinstance(value, Message):\n kwargs[arg.name] = value.text\n else:\n kwargs[arg.name] = value\n\n unflattened_kwargs = maybe_unflatten_dict(kwargs)\n\n output = await exec_tool.coroutine(**unflattened_kwargs)\n\n tool_content = []\n for item in output.content:\n item_dict = item.model_dump()\n tool_content.append(item_dict)\n return DataFrame(data=tool_content)\n return DataFrame(data=[{\"error\": \"You must select a tool\"}])\n except Exception as e:\n msg = f\"Error in build_output: {e!s}\"\n await logger.aexception(msg)\n raise ValueError(msg) from e\n\n def _get_session_context(self) -> str | None:\n \"\"\"Get the Langflow session ID for MCP session caching.\"\"\"\n # Try to get session ID from the component's execution context\n if hasattr(self, \"graph\") and hasattr(self.graph, \"session_id\"):\n session_id = self.graph.session_id\n # Include server name to ensure different servers get different sessions\n server_name = \"\"\n mcp_server = getattr(self, \"mcp_server\", None)\n if isinstance(mcp_server, dict):\n server_name = mcp_server.get(\"name\", \"\")\n elif mcp_server:\n server_name = str(mcp_server)\n return f\"{session_id}_{server_name}\" if session_id else None\n return None\n\n async def _get_tools(self):\n \"\"\"Get cached tools or update if necessary.\"\"\"\n mcp_server = getattr(self, \"mcp_server\", None)\n if not self._not_load_actions:\n tools, _ = await self.update_tool_list(mcp_server)\n return tools\n return []\n"
|
||
},
|
||
"mcp_server": {
|
||
"_input_type": "McpInput",
|
||
"advanced": false,
|
||
"display_name": "MCP Server",
|
||
"dynamic": false,
|
||
"info": "Select the MCP Server that will be used by this component",
|
||
"name": "mcp_server",
|
||
"placeholder": "",
|
||
"real_time_refresh": true,
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"trace_as_metadata": true,
|
||
"type": "mcp",
|
||
"value": {
|
||
"config": {},
|
||
"name": "lf-starter_project"
|
||
}
|
||
},
|
||
"tool": {
|
||
"_input_type": "DropdownInput",
|
||
"advanced": false,
|
||
"combobox": false,
|
||
"dialog_inputs": {},
|
||
"display_name": "Tool",
|
||
"dynamic": false,
|
||
"external_options": {},
|
||
"info": "Select the tool to execute",
|
||
"name": "tool",
|
||
"options": [
|
||
"opensearch_url_ingestion_flow"
|
||
],
|
||
"options_metadata": [],
|
||
"placeholder": "",
|
||
"real_time_refresh": true,
|
||
"required": true,
|
||
"show": false,
|
||
"title_case": false,
|
||
"toggle": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": ""
|
||
},
|
||
"tool_placeholder": {
|
||
"_input_type": "MessageTextInput",
|
||
"advanced": false,
|
||
"display_name": "Tool Placeholder",
|
||
"dynamic": false,
|
||
"info": "Placeholder for the tool",
|
||
"input_types": [
|
||
"Message"
|
||
],
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"name": "tool_placeholder",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": false,
|
||
"title_case": false,
|
||
"tool_mode": true,
|
||
"trace_as_input": true,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": ""
|
||
},
|
||
"tools_metadata": {
|
||
"_input_type": "ToolsInput",
|
||
"advanced": false,
|
||
"display_name": "Actions",
|
||
"dynamic": false,
|
||
"info": "Modify tool names and descriptions to help agents understand when to use each tool.",
|
||
"is_list": true,
|
||
"list_add_label": "Add More",
|
||
"name": "tools_metadata",
|
||
"placeholder": "",
|
||
"real_time_refresh": true,
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "tools",
|
||
"value": [
|
||
{
|
||
"args": {
|
||
"input_value": {
|
||
"anyOf": [
|
||
{
|
||
"type": "string"
|
||
},
|
||
{
|
||
"type": "null"
|
||
}
|
||
],
|
||
"default": null,
|
||
"description": "Message to be passed as input.",
|
||
"title": "Input Value"
|
||
}
|
||
},
|
||
"description": "This flow is to ingest the URL to open search.",
|
||
"display_description": "This flow is to ingest the URL to open search.",
|
||
"display_name": "opensearch_url_ingestion_flow",
|
||
"name": "opensearch_url_ingestion_flow",
|
||
"readonly": false,
|
||
"status": true,
|
||
"tags": [
|
||
"opensearch_url_ingestion_flow"
|
||
]
|
||
}
|
||
]
|
||
},
|
||
"use_cache": {
|
||
"_input_type": "BoolInput",
|
||
"advanced": true,
|
||
"display_name": "Use Cached Server",
|
||
"dynamic": false,
|
||
"info": "Enable caching of MCP Server and tools to improve performance. Disable to always fetch fresh tools and server updates.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "use_cache",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "bool",
|
||
"value": false
|
||
}
|
||
},
|
||
"tool_mode": true
|
||
},
|
||
"showNode": true,
|
||
"type": "MCP"
|
||
},
|
||
"dragging": false,
|
||
"id": "MCP-7EY21",
|
||
"measured": {
|
||
"height": 284,
|
||
"width": 320
|
||
},
|
||
"position": {
|
||
"x": 725.4538018784724,
|
||
"y": 894.358559115783
|
||
},
|
||
"selected": false,
|
||
"type": "genericNode"
|
||
},
|
||
{
|
||
"data": {
|
||
"id": "note-Wg9xF",
|
||
"node": {
|
||
"description": "## README\n\nThis flow generates answers for OpenRAG's chat, informed by the context stored in OpenSearch.\nIn this flow, the [**Langflow Agent** component](https://docs.langflow.org/agents) uses the connected [**Language Model** component](https://docs.langflow.org/components-models) to select the correct tool to complete requests.\n* If the Agent determines your request requires external knowledge, it will embed your query with the [**Embedding Model** component](https://docs.langflow.org/components-embedding-models) and query your [OpenSearch knowledge](https://docs.openr.ag/knowledge).\n\n* If the Agent determines your request requires a web search, it selects the [**MCP Tools** component](https://docs.langflow.org/mcp-client#mcp-tools-parameters) to fetch web content with the [OpenSearch URL ingestion flow](https://docs.openr.ag/ingestion#url-flow).\n\nUsing the retrieved data, the Agent generates a response with the connected [**Language Model** component](https://docs.langflow.org/components-models) and sends it to the [**Chat Output** component](https://docs.langflow.org/components-io).\n\nFor more information, see the [OpenRAG docs](https://docs.openr.ag/agents).",
|
||
"display_name": "",
|
||
"documentation": "",
|
||
"template": {
|
||
"backgroundColor": "amber"
|
||
}
|
||
},
|
||
"type": "note"
|
||
},
|
||
"dragging": false,
|
||
"height": 469,
|
||
"id": "note-Wg9xF",
|
||
"measured": {
|
||
"height": 469,
|
||
"width": 644
|
||
},
|
||
"position": {
|
||
"x": 19.942791510714386,
|
||
"y": 259.5061905471592
|
||
},
|
||
"resizing": false,
|
||
"selected": false,
|
||
"type": "noteNode",
|
||
"width": 644
|
||
},
|
||
{
|
||
"data": {
|
||
"id": "ChatInput-ci8VE",
|
||
"node": {
|
||
"base_classes": [
|
||
"Message"
|
||
],
|
||
"beta": false,
|
||
"conditional_paths": [],
|
||
"custom_fields": {},
|
||
"description": "Get chat inputs from the Playground.",
|
||
"display_name": "Chat Input",
|
||
"documentation": "https://docs.langflow.org/components-io#chat-input",
|
||
"edited": false,
|
||
"field_order": [
|
||
"input_value",
|
||
"should_store_message",
|
||
"sender",
|
||
"sender_name",
|
||
"session_id",
|
||
"context_id",
|
||
"files"
|
||
],
|
||
"frozen": false,
|
||
"icon": "MessagesSquare",
|
||
"legacy": false,
|
||
"metadata": {
|
||
"code_hash": "0014a5b41817",
|
||
"dependencies": {
|
||
"dependencies": [
|
||
{
|
||
"name": "lfx",
|
||
"version": "0.1.13.dev9"
|
||
}
|
||
],
|
||
"total_dependencies": 1
|
||
},
|
||
"module": "lfx.components.input_output.chat.ChatInput"
|
||
},
|
||
"minimized": true,
|
||
"output_types": [],
|
||
"outputs": [
|
||
{
|
||
"allows_loop": false,
|
||
"cache": true,
|
||
"display_name": "Chat Message",
|
||
"group_outputs": false,
|
||
"method": "message_response",
|
||
"name": "message",
|
||
"selected": "Message",
|
||
"tool_mode": true,
|
||
"types": [
|
||
"Message"
|
||
],
|
||
"value": "__UNDEFINED__"
|
||
}
|
||
],
|
||
"pinned": false,
|
||
"template": {
|
||
"_type": "Component",
|
||
"code": {
|
||
"advanced": true,
|
||
"dynamic": true,
|
||
"fileTypes": [],
|
||
"file_path": "",
|
||
"info": "",
|
||
"list": false,
|
||
"load_from_db": false,
|
||
"multiline": true,
|
||
"name": "code",
|
||
"password": false,
|
||
"placeholder": "",
|
||
"required": true,
|
||
"show": true,
|
||
"title_case": false,
|
||
"type": "code",
|
||
"value": "from lfx.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.inputs.inputs import BoolInput\nfrom lfx.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom lfx.schema.message import Message\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-input\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Input Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n temp_file=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Chat Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n # Ensure files is a list and filter out empty/None values\n files = self.files if self.files else []\n if files and not isinstance(files, list):\n files = [files]\n # Filter out None/empty values\n files = [f for f in files if f is not None and f != \"\"]\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n context_id=self.context_id,\n files=files,\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n"
|
||
},
|
||
"context_id": {
|
||
"_input_type": "MessageTextInput",
|
||
"advanced": true,
|
||
"display_name": "Context ID",
|
||
"dynamic": false,
|
||
"info": "The context ID of the chat. Adds an extra layer to the local memory.",
|
||
"input_types": [
|
||
"Message"
|
||
],
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"name": "context_id",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_input": true,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": ""
|
||
},
|
||
"files": {
|
||
"_input_type": "FileInput",
|
||
"advanced": true,
|
||
"display_name": "Files",
|
||
"dynamic": false,
|
||
"fileTypes": [
|
||
"csv",
|
||
"json",
|
||
"pdf",
|
||
"txt",
|
||
"md",
|
||
"mdx",
|
||
"yaml",
|
||
"yml",
|
||
"xml",
|
||
"html",
|
||
"htm",
|
||
"docx",
|
||
"py",
|
||
"sh",
|
||
"sql",
|
||
"js",
|
||
"ts",
|
||
"tsx",
|
||
"jpg",
|
||
"jpeg",
|
||
"png",
|
||
"bmp",
|
||
"image"
|
||
],
|
||
"file_path": "",
|
||
"info": "Files to be sent with the message.",
|
||
"list": true,
|
||
"list_add_label": "Add More",
|
||
"name": "files",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"temp_file": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "file",
|
||
"value": ""
|
||
},
|
||
"input_value": {
|
||
"_input_type": "MultilineInput",
|
||
"advanced": false,
|
||
"copy_field": false,
|
||
"display_name": "Input Text",
|
||
"dynamic": false,
|
||
"info": "Message to be passed as input.",
|
||
"input_types": [],
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"multiline": true,
|
||
"name": "input_value",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_input": true,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": ""
|
||
},
|
||
"sender": {
|
||
"_input_type": "DropdownInput",
|
||
"advanced": true,
|
||
"combobox": false,
|
||
"dialog_inputs": {},
|
||
"display_name": "Sender Type",
|
||
"dynamic": false,
|
||
"external_options": {},
|
||
"info": "Type of sender.",
|
||
"name": "sender",
|
||
"options": [
|
||
"Machine",
|
||
"User"
|
||
],
|
||
"options_metadata": [],
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"toggle": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": "User"
|
||
},
|
||
"sender_name": {
|
||
"_input_type": "MessageTextInput",
|
||
"advanced": true,
|
||
"display_name": "Sender Name",
|
||
"dynamic": false,
|
||
"info": "Name of the sender.",
|
||
"input_types": [
|
||
"Message"
|
||
],
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"name": "sender_name",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_input": true,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": "User"
|
||
},
|
||
"session_id": {
|
||
"_input_type": "MessageTextInput",
|
||
"advanced": true,
|
||
"display_name": "Session ID",
|
||
"dynamic": false,
|
||
"info": "The session ID of the chat. If empty, the current session ID parameter will be used.",
|
||
"input_types": [
|
||
"Message"
|
||
],
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"name": "session_id",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_input": true,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": ""
|
||
},
|
||
"should_store_message": {
|
||
"_input_type": "BoolInput",
|
||
"advanced": true,
|
||
"display_name": "Store Messages",
|
||
"dynamic": false,
|
||
"info": "Store the message in the history.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "should_store_message",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "bool",
|
||
"value": true
|
||
}
|
||
},
|
||
"tool_mode": false
|
||
},
|
||
"showNode": false,
|
||
"type": "ChatInput"
|
||
},
|
||
"dragging": false,
|
||
"id": "ChatInput-ci8VE",
|
||
"measured": {
|
||
"height": 48,
|
||
"width": 192
|
||
},
|
||
"position": {
|
||
"x": 1287.1568425342111,
|
||
"y": 1247.5760137961474
|
||
},
|
||
"selected": false,
|
||
"type": "genericNode"
|
||
},
|
||
{
|
||
"data": {
|
||
"id": "ChatOutput-gWl8E",
|
||
"node": {
|
||
"base_classes": [
|
||
"Message"
|
||
],
|
||
"beta": false,
|
||
"conditional_paths": [],
|
||
"custom_fields": {},
|
||
"description": "Display a chat message in the Playground.",
|
||
"display_name": "Chat Output",
|
||
"documentation": "https://docs.langflow.org/components-io#chat-output",
|
||
"edited": false,
|
||
"field_order": [
|
||
"input_value",
|
||
"should_store_message",
|
||
"sender",
|
||
"sender_name",
|
||
"session_id",
|
||
"context_id",
|
||
"data_template",
|
||
"clean_data"
|
||
],
|
||
"frozen": false,
|
||
"icon": "MessagesSquare",
|
||
"legacy": false,
|
||
"metadata": {
|
||
"code_hash": "4848ad3e35d5",
|
||
"dependencies": {
|
||
"dependencies": [
|
||
{
|
||
"name": "orjson",
|
||
"version": "3.10.15"
|
||
},
|
||
{
|
||
"name": "fastapi",
|
||
"version": "0.119.1"
|
||
},
|
||
{
|
||
"name": "lfx",
|
||
"version": "0.1.13.dev9"
|
||
}
|
||
],
|
||
"total_dependencies": 3
|
||
},
|
||
"module": "lfx.components.input_output.chat_output.ChatOutput"
|
||
},
|
||
"minimized": true,
|
||
"output_types": [],
|
||
"outputs": [
|
||
{
|
||
"allows_loop": false,
|
||
"cache": true,
|
||
"display_name": "Output Message",
|
||
"group_outputs": false,
|
||
"method": "message_response",
|
||
"name": "message",
|
||
"selected": "Message",
|
||
"tool_mode": true,
|
||
"types": [
|
||
"Message"
|
||
],
|
||
"value": "__UNDEFINED__"
|
||
}
|
||
],
|
||
"pinned": false,
|
||
"template": {
|
||
"_type": "Component",
|
||
"clean_data": {
|
||
"_input_type": "BoolInput",
|
||
"advanced": true,
|
||
"display_name": "Basic Clean Data",
|
||
"dynamic": false,
|
||
"info": "Whether to clean data before converting to string.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "clean_data",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "bool",
|
||
"value": true
|
||
},
|
||
"code": {
|
||
"advanced": true,
|
||
"dynamic": true,
|
||
"fileTypes": [],
|
||
"file_path": "",
|
||
"info": "",
|
||
"list": false,
|
||
"load_from_db": false,
|
||
"multiline": true,
|
||
"name": "code",
|
||
"password": false,
|
||
"placeholder": "",
|
||
"required": true,
|
||
"show": true,
|
||
"title_case": false,
|
||
"type": "code",
|
||
"value": "from collections.abc import Generator\nfrom typing import Any\n\nimport orjson\nfrom fastapi.encoders import jsonable_encoder\n\nfrom lfx.base.io.chat import ChatComponent\nfrom lfx.helpers.data import safe_convert\nfrom lfx.inputs.inputs import BoolInput, DropdownInput, HandleInput, MessageTextInput\nfrom lfx.schema.data import Data\nfrom lfx.schema.dataframe import DataFrame\nfrom lfx.schema.message import Message\nfrom lfx.schema.properties import Source\nfrom lfx.template.field.base import Output\nfrom lfx.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n documentation: str = \"https://docs.langflow.org/components-io#chat-output\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n HandleInput(\n name=\"input_value\",\n display_name=\"Inputs\",\n info=\"Message to be passed as output.\",\n input_types=[\"Data\", \"DataFrame\", \"Message\"],\n required=True,\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n BoolInput(\n name=\"clean_data\",\n display_name=\"Basic Clean Data\",\n value=True,\n advanced=True,\n info=\"Whether to clean data before converting to string.\",\n ),\n ]\n outputs = [\n Output(\n display_name=\"Output Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n # Handle case where source is a ChatOpenAI object\n if hasattr(source, \"model_name\"):\n source_dict[\"source\"] = source.model_name\n elif hasattr(source, \"model\"):\n source_dict[\"source\"] = str(source.model)\n else:\n source_dict[\"source\"] = str(source)\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n # First convert the input to string if needed\n text = self.convert_to_string()\n\n # Get source properties\n source, _, display_name, source_id = self.get_properties_from_source_component()\n\n # Create or use existing Message object\n if isinstance(self.input_value, Message):\n message = self.input_value\n # Update message properties\n message.text = text\n else:\n message = Message(text=text)\n\n # Set message properties\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.context_id = self.context_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n\n # Store message if needed\n if self.session_id and self.should_store_message:\n stored_message = await self.send_message(message)\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n\n def _serialize_data(self, data: Data) -> str:\n \"\"\"Serialize Data object to JSON string.\"\"\"\n # Convert data.data to JSON-serializable format\n serializable_data = jsonable_encoder(data.data)\n # Serialize with orjson, enabling pretty printing with indentation\n json_bytes = orjson.dumps(serializable_data, option=orjson.OPT_INDENT_2)\n # Convert bytes to string and wrap in Markdown code blocks\n return \"```json\\n\" + json_bytes.decode(\"utf-8\") + \"\\n```\"\n\n def _validate_input(self) -> None:\n \"\"\"Validate the input data and raise ValueError if invalid.\"\"\"\n if self.input_value is None:\n msg = \"Input data cannot be None\"\n raise ValueError(msg)\n if isinstance(self.input_value, list) and not all(\n isinstance(item, Message | Data | DataFrame | str) for item in self.input_value\n ):\n invalid_types = [\n type(item).__name__\n for item in self.input_value\n if not isinstance(item, Message | Data | DataFrame | str)\n ]\n msg = f\"Expected Data or DataFrame or Message or str, got {invalid_types}\"\n raise TypeError(msg)\n if not isinstance(\n self.input_value,\n Message | Data | DataFrame | str | list | Generator | type(None),\n ):\n type_name = type(self.input_value).__name__\n msg = f\"Expected Data or DataFrame or Message or str, Generator or None, got {type_name}\"\n raise TypeError(msg)\n\n def convert_to_string(self) -> str | Generator[Any, None, None]:\n \"\"\"Convert input data to string with proper error handling.\"\"\"\n self._validate_input()\n if isinstance(self.input_value, list):\n clean_data: bool = getattr(self, \"clean_data\", False)\n return \"\\n\".join([safe_convert(item, clean_data=clean_data) for item in self.input_value])\n if isinstance(self.input_value, Generator):\n return self.input_value\n return safe_convert(self.input_value)\n"
|
||
},
|
||
"context_id": {
|
||
"_input_type": "MessageTextInput",
|
||
"advanced": true,
|
||
"display_name": "Context ID",
|
||
"dynamic": false,
|
||
"info": "The context ID of the chat. Adds an extra layer to the local memory.",
|
||
"input_types": [
|
||
"Message"
|
||
],
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"name": "context_id",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_input": true,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": ""
|
||
},
|
||
"data_template": {
|
||
"_input_type": "MessageTextInput",
|
||
"advanced": true,
|
||
"display_name": "Data Template",
|
||
"dynamic": false,
|
||
"info": "Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.",
|
||
"input_types": [
|
||
"Message"
|
||
],
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"name": "data_template",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_input": true,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": "{text}"
|
||
},
|
||
"input_value": {
|
||
"_input_type": "HandleInput",
|
||
"advanced": false,
|
||
"display_name": "Inputs",
|
||
"dynamic": false,
|
||
"info": "Message to be passed as output.",
|
||
"input_types": [
|
||
"Data",
|
||
"DataFrame",
|
||
"Message"
|
||
],
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "input_value",
|
||
"placeholder": "",
|
||
"required": true,
|
||
"show": true,
|
||
"title_case": false,
|
||
"trace_as_metadata": true,
|
||
"type": "other",
|
||
"value": ""
|
||
},
|
||
"sender": {
|
||
"_input_type": "DropdownInput",
|
||
"advanced": true,
|
||
"combobox": false,
|
||
"dialog_inputs": {},
|
||
"display_name": "Sender Type",
|
||
"dynamic": false,
|
||
"external_options": {},
|
||
"info": "Type of sender.",
|
||
"name": "sender",
|
||
"options": [
|
||
"Machine",
|
||
"User"
|
||
],
|
||
"options_metadata": [],
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"toggle": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": "Machine"
|
||
},
|
||
"sender_name": {
|
||
"_input_type": "MessageTextInput",
|
||
"advanced": true,
|
||
"display_name": "Sender Name",
|
||
"dynamic": false,
|
||
"info": "Name of the sender.",
|
||
"input_types": [
|
||
"Message"
|
||
],
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"name": "sender_name",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_input": true,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": "AI"
|
||
},
|
||
"session_id": {
|
||
"_input_type": "MessageTextInput",
|
||
"advanced": true,
|
||
"display_name": "Session ID",
|
||
"dynamic": false,
|
||
"info": "The session ID of the chat. If empty, the current session ID parameter will be used.",
|
||
"input_types": [
|
||
"Message"
|
||
],
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"name": "session_id",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_input": true,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": ""
|
||
},
|
||
"should_store_message": {
|
||
"_input_type": "BoolInput",
|
||
"advanced": true,
|
||
"display_name": "Store Messages",
|
||
"dynamic": false,
|
||
"info": "Store the message in the history.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "should_store_message",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "bool",
|
||
"value": true
|
||
}
|
||
},
|
||
"tool_mode": false
|
||
},
|
||
"showNode": false,
|
||
"type": "ChatOutput"
|
||
},
|
||
"id": "ChatOutput-gWl8E",
|
||
"measured": {
|
||
"height": 48,
|
||
"width": 192
|
||
},
|
||
"position": {
|
||
"x": 2115.4119264295655,
|
||
"y": 614.3278230710916
|
||
},
|
||
"selected": false,
|
||
"type": "genericNode"
|
||
},
|
||
{
|
||
"data": {
|
||
"id": "EmbeddingModel-oPi95",
|
||
"node": {
|
||
"base_classes": [
|
||
"Embeddings"
|
||
],
|
||
"beta": false,
|
||
"conditional_paths": [],
|
||
"custom_fields": {},
|
||
"description": "Generate embeddings using a specified provider.",
|
||
"display_name": "Embedding Model",
|
||
"documentation": "https://docs.langflow.org/components-embedding-models",
|
||
"edited": false,
|
||
"field_order": [
|
||
"provider",
|
||
"api_base",
|
||
"ollama_base_url",
|
||
"base_url_ibm_watsonx",
|
||
"model",
|
||
"api_key",
|
||
"project_id",
|
||
"dimensions",
|
||
"chunk_size",
|
||
"request_timeout",
|
||
"max_retries",
|
||
"show_progress_bar",
|
||
"model_kwargs"
|
||
],
|
||
"frozen": false,
|
||
"icon": "binary",
|
||
"last_updated": "2025-11-11T21:40:35.510Z",
|
||
"legacy": false,
|
||
"metadata": {
|
||
"code_hash": "bb03f97be707",
|
||
"dependencies": {
|
||
"dependencies": [
|
||
{
|
||
"name": "langchain_openai",
|
||
"version": "0.3.23"
|
||
},
|
||
{
|
||
"name": "lfx",
|
||
"version": null
|
||
},
|
||
{
|
||
"name": "langchain_ollama",
|
||
"version": "0.3.10"
|
||
},
|
||
{
|
||
"name": "langchain_community",
|
||
"version": "0.3.21"
|
||
},
|
||
{
|
||
"name": "langchain_ibm",
|
||
"version": "0.3.19"
|
||
}
|
||
],
|
||
"total_dependencies": 5
|
||
},
|
||
"module": "lfx.components.models.embedding_model.EmbeddingModelComponent"
|
||
},
|
||
"minimized": false,
|
||
"output_types": [],
|
||
"outputs": [
|
||
{
|
||
"allows_loop": false,
|
||
"cache": true,
|
||
"display_name": "Embedding Model",
|
||
"group_outputs": false,
|
||
"method": "build_embeddings",
|
||
"name": "embeddings",
|
||
"options": null,
|
||
"required_inputs": null,
|
||
"selected": "Embeddings",
|
||
"tool_mode": true,
|
||
"types": [
|
||
"Embeddings"
|
||
],
|
||
"value": "__UNDEFINED__"
|
||
}
|
||
],
|
||
"pinned": false,
|
||
"template": {
|
||
"_type": "Component",
|
||
"api_base": {
|
||
"_input_type": "MessageTextInput",
|
||
"advanced": true,
|
||
"display_name": "API Base URL",
|
||
"dynamic": false,
|
||
"info": "Base URL for the API. Leave empty for default.",
|
||
"input_types": [
|
||
"Message"
|
||
],
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"name": "api_base",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_input": true,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": ""
|
||
},
|
||
"api_key": {
|
||
"_input_type": "SecretStrInput",
|
||
"advanced": false,
|
||
"display_name": "OpenAI API Key",
|
||
"dynamic": false,
|
||
"info": "Model Provider API key",
|
||
"input_types": [],
|
||
"load_from_db": true,
|
||
"name": "api_key",
|
||
"password": true,
|
||
"placeholder": "",
|
||
"real_time_refresh": true,
|
||
"required": true,
|
||
"show": true,
|
||
"title_case": false,
|
||
"type": "str",
|
||
"value": "OPENAI_API_KEY"
|
||
},
|
||
"base_url_ibm_watsonx": {
|
||
"_input_type": "DropdownInput",
|
||
"advanced": false,
|
||
"combobox": false,
|
||
"dialog_inputs": {},
|
||
"display_name": "watsonx API Endpoint",
|
||
"dynamic": false,
|
||
"external_options": {},
|
||
"info": "The base URL of the API (IBM watsonx.ai only)",
|
||
"name": "base_url_ibm_watsonx",
|
||
"options": [
|
||
"https://us-south.ml.cloud.ibm.com",
|
||
"https://eu-de.ml.cloud.ibm.com",
|
||
"https://eu-gb.ml.cloud.ibm.com",
|
||
"https://au-syd.ml.cloud.ibm.com",
|
||
"https://jp-tok.ml.cloud.ibm.com",
|
||
"https://ca-tor.ml.cloud.ibm.com"
|
||
],
|
||
"options_metadata": [],
|
||
"placeholder": "",
|
||
"real_time_refresh": true,
|
||
"required": false,
|
||
"show": false,
|
||
"title_case": false,
|
||
"toggle": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": "https://us-south.ml.cloud.ibm.com"
|
||
},
|
||
"chunk_size": {
|
||
"_input_type": "IntInput",
|
||
"advanced": true,
|
||
"display_name": "Chunk Size",
|
||
"dynamic": false,
|
||
"info": "",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "chunk_size",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "int",
|
||
"value": 1000
|
||
},
|
||
"code": {
|
||
"advanced": true,
|
||
"dynamic": true,
|
||
"fileTypes": [],
|
||
"file_path": "",
|
||
"info": "",
|
||
"list": false,
|
||
"load_from_db": false,
|
||
"multiline": true,
|
||
"name": "code",
|
||
"password": false,
|
||
"placeholder": "",
|
||
"required": true,
|
||
"show": true,
|
||
"title_case": false,
|
||
"type": "code",
|
||
"value": "from typing import Any\n\nfrom langchain_openai import OpenAIEmbeddings\n\nfrom lfx.base.embeddings.model import LCEmbeddingsModel\nfrom lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url\nfrom lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES\nfrom lfx.base.models.watsonx_constants import IBM_WATSONX_URLS, WATSONX_EMBEDDING_MODEL_NAMES\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import (\n BoolInput,\n DictInput,\n DropdownInput,\n FloatInput,\n IntInput,\n MessageTextInput,\n SecretStrInput,\n)\nfrom lfx.log.logger import logger\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.utils.util import transform_localhost_url\n\n# Ollama API constants\nHTTP_STATUS_OK = 200\nJSON_MODELS_KEY = \"models\"\nJSON_NAME_KEY = \"name\"\nJSON_CAPABILITIES_KEY = \"capabilities\"\nDESIRED_CAPABILITY = \"embedding\"\nDEFAULT_OLLAMA_URL = \"http://localhost:11434\"\n\n\nclass EmbeddingModelComponent(LCEmbeddingsModel):\n display_name = \"Embedding Model\"\n description = \"Generate embeddings using a specified provider.\"\n documentation: str = \"https://docs.langflow.org/components-embedding-models\"\n icon = \"binary\"\n name = \"EmbeddingModel\"\n category = \"models\"\n\n inputs = [\n DropdownInput(\n name=\"provider\",\n display_name=\"Model Provider\",\n options=[\"OpenAI\", \"Ollama\", \"IBM watsonx.ai\"],\n value=\"OpenAI\",\n info=\"Select the embedding model provider\",\n real_time_refresh=True,\n options_metadata=[{\"icon\": \"OpenAI\"}, {\"icon\": \"Ollama\"}, {\"icon\": \"WatsonxAI\"}],\n ),\n MessageTextInput(\n name=\"api_base\",\n display_name=\"API Base URL\",\n info=\"Base URL for the API. Leave empty for default.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"ollama_base_url\",\n display_name=\"Ollama API URL\",\n info=f\"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}\",\n value=DEFAULT_OLLAMA_URL,\n show=False,\n real_time_refresh=True,\n load_from_db=True,\n ),\n DropdownInput(\n name=\"base_url_ibm_watsonx\",\n display_name=\"watsonx API Endpoint\",\n info=\"The base URL of the API (IBM watsonx.ai only)\",\n options=IBM_WATSONX_URLS,\n value=IBM_WATSONX_URLS[0],\n show=False,\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"model\",\n display_name=\"Model Name\",\n options=OPENAI_EMBEDDING_MODEL_NAMES,\n value=OPENAI_EMBEDDING_MODEL_NAMES[0],\n info=\"Select the embedding model to use\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"Model Provider API key\",\n required=True,\n show=True,\n real_time_refresh=True,\n ),\n # Watson-specific inputs\n MessageTextInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"IBM watsonx.ai Project ID (required for IBM watsonx.ai)\",\n show=False,\n ),\n IntInput(\n name=\"dimensions\",\n display_name=\"Dimensions\",\n info=\"The number of dimensions the resulting output embeddings should have. \"\n \"Only supported by certain models.\",\n advanced=True,\n ),\n IntInput(name=\"chunk_size\", display_name=\"Chunk Size\", advanced=True, value=1000),\n FloatInput(name=\"request_timeout\", display_name=\"Request Timeout\", advanced=True),\n IntInput(name=\"max_retries\", display_name=\"Max Retries\", advanced=True, value=3),\n BoolInput(name=\"show_progress_bar\", display_name=\"Show Progress Bar\", advanced=True),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n ]\n\n def build_embeddings(self) -> Embeddings:\n provider = self.provider\n model = self.model\n api_key = self.api_key\n api_base = self.api_base\n base_url_ibm_watsonx = self.base_url_ibm_watsonx\n ollama_base_url = self.ollama_base_url\n dimensions = self.dimensions\n chunk_size = self.chunk_size\n request_timeout = self.request_timeout\n max_retries = self.max_retries\n show_progress_bar = self.show_progress_bar\n model_kwargs = self.model_kwargs or {}\n\n if provider == \"OpenAI\":\n if not api_key:\n msg = \"OpenAI API key is required when using OpenAI provider\"\n raise ValueError(msg)\n return OpenAIEmbeddings(\n model=model,\n dimensions=dimensions or None,\n base_url=api_base or None,\n api_key=api_key,\n chunk_size=chunk_size,\n max_retries=max_retries,\n timeout=request_timeout or None,\n show_progress_bar=show_progress_bar,\n model_kwargs=model_kwargs,\n )\n\n if provider == \"Ollama\":\n try:\n from langchain_ollama import OllamaEmbeddings\n except ImportError:\n try:\n from langchain_community.embeddings import OllamaEmbeddings\n except ImportError:\n msg = \"Please install langchain-ollama: pip install langchain-ollama\"\n raise ImportError(msg) from None\n\n transformed_base_url = transform_localhost_url(ollama_base_url)\n\n # Check if URL contains /v1 suffix (OpenAI-compatible mode)\n if transformed_base_url and transformed_base_url.rstrip(\"/\").endswith(\"/v1\"):\n # Strip /v1 suffix and log warning\n transformed_base_url = transformed_base_url.rstrip(\"/\").removesuffix(\"/v1\")\n logger.warning(\n \"Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, \"\n \"not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. \"\n \"If you want to use the OpenAI-compatible API, please use the OpenAI component instead. \"\n \"Learn more at https://docs.ollama.com/openai#openai-compatibility\"\n )\n\n return OllamaEmbeddings(\n model=model,\n base_url=transformed_base_url or \"http://localhost:11434\",\n **model_kwargs,\n )\n\n if provider == \"IBM watsonx.ai\":\n try:\n from langchain_ibm import WatsonxEmbeddings\n except ImportError:\n msg = \"Please install langchain-ibm: pip install langchain-ibm\"\n raise ImportError(msg) from None\n\n if not api_key:\n msg = \"IBM watsonx.ai API key is required when using IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n project_id = self.project_id\n\n if not project_id:\n msg = \"Project ID is required for IBM watsonx.ai provider\"\n raise ValueError(msg)\n\n params = {\n \"model_id\": model,\n \"url\": base_url_ibm_watsonx or \"https://us-south.ml.cloud.ibm.com\",\n \"apikey\": api_key,\n }\n\n params[\"project_id\"] = project_id\n\n return WatsonxEmbeddings(**params)\n\n msg = f\"Unknown provider: {provider}\"\n raise ValueError(msg)\n\n async def update_build_config(\n self, build_config: dotdict, field_value: Any, field_name: str | None = None\n ) -> dotdict:\n if field_name == \"provider\":\n if field_value == \"OpenAI\":\n build_config[\"model\"][\"options\"] = OPENAI_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = OPENAI_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"OpenAI API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"display_name\"] = \"OpenAI API Base URL\"\n build_config[\"api_base\"][\"advanced\"] = True\n build_config[\"api_base\"][\"show\"] = True\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"Ollama\":\n build_config[\"ollama_base_url\"][\"show\"] = True\n\n if await is_valid_ollama_url(url=self.ollama_base_url):\n try:\n models = await get_ollama_models(\n base_url_value=self.ollama_base_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n else:\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n build_config[\"api_key\"][\"display_name\"] = \"API Key (Optional)\"\n build_config[\"api_key\"][\"required\"] = False\n build_config[\"api_key\"][\"show\"] = False\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"project_id\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = False\n\n elif field_value == \"IBM watsonx.ai\":\n build_config[\"model\"][\"options\"] = WATSONX_EMBEDDING_MODEL_NAMES\n build_config[\"model\"][\"value\"] = WATSONX_EMBEDDING_MODEL_NAMES[0]\n build_config[\"api_key\"][\"display_name\"] = \"IBM watsonx.ai API Key\"\n build_config[\"api_key\"][\"required\"] = True\n build_config[\"api_key\"][\"show\"] = True\n build_config[\"api_base\"][\"show\"] = False\n build_config[\"ollama_base_url\"][\"show\"] = False\n build_config[\"base_url_ibm_watsonx\"][\"show\"] = True\n build_config[\"project_id\"][\"show\"] = True\n\n elif field_name == \"ollama_base_url\":\n # # Refresh Ollama models when base URL changes\n # if hasattr(self, \"provider\") and self.provider == \"Ollama\":\n # Use field_value if provided, otherwise fall back to instance attribute\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n build_config[\"model\"][\"value\"] = models[0] if models else \"\"\n except ValueError:\n await logger.awarning(\"Failed to fetch Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n build_config[\"model\"][\"value\"] = \"\"\n\n elif field_name == \"model\" and self.provider == \"Ollama\":\n ollama_url = self.ollama_base_url\n if await is_valid_ollama_url(url=ollama_url):\n try:\n models = await get_ollama_models(\n base_url_value=ollama_url,\n desired_capability=DESIRED_CAPABILITY,\n json_models_key=JSON_MODELS_KEY,\n json_name_key=JSON_NAME_KEY,\n json_capabilities_key=JSON_CAPABILITIES_KEY,\n )\n build_config[\"model\"][\"options\"] = models\n except ValueError:\n await logger.awarning(\"Failed to refresh Ollama embedding models.\")\n build_config[\"model\"][\"options\"] = []\n\n return build_config\n"
|
||
},
|
||
"dimensions": {
|
||
"_input_type": "IntInput",
|
||
"advanced": true,
|
||
"display_name": "Dimensions",
|
||
"dynamic": false,
|
||
"info": "The number of dimensions the resulting output embeddings should have. Only supported by certain models.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "dimensions",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "int",
|
||
"value": ""
|
||
},
|
||
"max_retries": {
|
||
"_input_type": "IntInput",
|
||
"advanced": true,
|
||
"display_name": "Max Retries",
|
||
"dynamic": false,
|
||
"info": "",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "max_retries",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "int",
|
||
"value": 3
|
||
},
|
||
"model": {
|
||
"_input_type": "DropdownInput",
|
||
"advanced": false,
|
||
"combobox": false,
|
||
"dialog_inputs": {},
|
||
"display_name": "Model Name",
|
||
"dynamic": false,
|
||
"external_options": {},
|
||
"info": "Select the embedding model to use",
|
||
"name": "model",
|
||
"options": [
|
||
"text-embedding-3-small"
|
||
],
|
||
"options_metadata": [],
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"toggle": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": "text-embedding-3-small"
|
||
},
|
||
"model_kwargs": {
|
||
"_input_type": "DictInput",
|
||
"advanced": true,
|
||
"display_name": "Model Kwargs",
|
||
"dynamic": false,
|
||
"info": "Additional keyword arguments to pass to the model.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "model_kwargs",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_input": true,
|
||
"type": "dict",
|
||
"value": {}
|
||
},
|
||
"ollama_base_url": {
|
||
"_input_type": "MessageTextInput",
|
||
"advanced": false,
|
||
"display_name": "Ollama API URL",
|
||
"dynamic": false,
|
||
"info": "Endpoint of the Ollama API (Ollama only). Defaults to http://localhost:11434",
|
||
"input_types": [
|
||
"Message"
|
||
],
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"name": "ollama_base_url",
|
||
"placeholder": "",
|
||
"real_time_refresh": true,
|
||
"required": false,
|
||
"show": false,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_input": true,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": ""
|
||
},
|
||
"project_id": {
|
||
"_input_type": "MessageTextInput",
|
||
"advanced": false,
|
||
"display_name": "Project ID",
|
||
"dynamic": false,
|
||
"info": "IBM watsonx.ai Project ID (required for IBM watsonx.ai)",
|
||
"input_types": [
|
||
"Message"
|
||
],
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"name": "project_id",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": false,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_input": true,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": ""
|
||
},
|
||
"provider": {
|
||
"_input_type": "DropdownInput",
|
||
"advanced": false,
|
||
"combobox": false,
|
||
"dialog_inputs": {},
|
||
"display_name": "Model Provider",
|
||
"dynamic": false,
|
||
"external_options": {},
|
||
"info": "Select the embedding model provider",
|
||
"name": "provider",
|
||
"options": [
|
||
"OpenAI",
|
||
"Ollama",
|
||
"IBM watsonx.ai"
|
||
],
|
||
"options_metadata": [
|
||
{
|
||
"icon": "OpenAI"
|
||
},
|
||
{
|
||
"icon": "Ollama"
|
||
},
|
||
{
|
||
"icon": "WatsonxAI"
|
||
}
|
||
],
|
||
"placeholder": "",
|
||
"real_time_refresh": true,
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"toggle": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": "OpenAI"
|
||
},
|
||
"request_timeout": {
|
||
"_input_type": "FloatInput",
|
||
"advanced": true,
|
||
"display_name": "Request Timeout",
|
||
"dynamic": false,
|
||
"info": "",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "request_timeout",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "float",
|
||
"value": ""
|
||
},
|
||
"show_progress_bar": {
|
||
"_input_type": "BoolInput",
|
||
"advanced": true,
|
||
"display_name": "Show Progress Bar",
|
||
"dynamic": false,
|
||
"info": "",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "show_progress_bar",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "bool",
|
||
"value": false
|
||
}
|
||
},
|
||
"tool_mode": false
|
||
},
|
||
"showNode": true,
|
||
"type": "EmbeddingModel"
|
||
},
|
||
"dragging": false,
|
||
"id": "EmbeddingModel-oPi95",
|
||
"measured": {
|
||
"height": 451,
|
||
"width": 320
|
||
},
|
||
"position": {
|
||
"x": 729.3034344219965,
|
||
"y": 408.1587064380646
|
||
},
|
||
"selected": false,
|
||
"type": "genericNode"
|
||
},
|
||
{
|
||
"data": {
|
||
"id": "Agent-Nfw7u",
|
||
"node": {
|
||
"base_classes": [
|
||
"Message"
|
||
],
|
||
"beta": false,
|
||
"conditional_paths": [],
|
||
"custom_fields": {},
|
||
"description": "Define the agent's instructions, then enter a task to complete using tools.",
|
||
"display_name": "Agent",
|
||
"documentation": "https://docs.langflow.org/agents",
|
||
"edited": false,
|
||
"field_order": [
|
||
"agent_llm",
|
||
"api_key",
|
||
"base_url",
|
||
"project_id",
|
||
"max_output_tokens",
|
||
"max_tokens",
|
||
"model_kwargs",
|
||
"model_name",
|
||
"openai_api_base",
|
||
"api_key",
|
||
"temperature",
|
||
"seed",
|
||
"max_retries",
|
||
"timeout",
|
||
"system_prompt",
|
||
"context_id",
|
||
"n_messages",
|
||
"format_instructions",
|
||
"output_schema",
|
||
"tools",
|
||
"input_value",
|
||
"handle_parsing_errors",
|
||
"verbose",
|
||
"max_iterations",
|
||
"agent_description",
|
||
"add_current_date_tool"
|
||
],
|
||
"frozen": false,
|
||
"icon": "bot",
|
||
"last_updated": "2025-11-11T21:40:35.510Z",
|
||
"legacy": false,
|
||
"metadata": {
|
||
"code_hash": "adf733969280",
|
||
"dependencies": {
|
||
"dependencies": [
|
||
{
|
||
"name": "langchain_core",
|
||
"version": "0.3.79"
|
||
},
|
||
{
|
||
"name": "pydantic",
|
||
"version": "2.10.6"
|
||
},
|
||
{
|
||
"name": "lfx",
|
||
"version": null
|
||
}
|
||
],
|
||
"total_dependencies": 3
|
||
},
|
||
"module": "lfx.components.agents.agent.AgentComponent"
|
||
},
|
||
"minimized": false,
|
||
"output_types": [],
|
||
"outputs": [
|
||
{
|
||
"allows_loop": false,
|
||
"cache": true,
|
||
"display_name": "Response",
|
||
"group_outputs": false,
|
||
"method": "message_response",
|
||
"name": "response",
|
||
"options": null,
|
||
"required_inputs": null,
|
||
"selected": "Message",
|
||
"tool_mode": true,
|
||
"types": [
|
||
"Message"
|
||
],
|
||
"value": "__UNDEFINED__"
|
||
}
|
||
],
|
||
"pinned": false,
|
||
"template": {
|
||
"_type": "Component",
|
||
"add_current_date_tool": {
|
||
"_input_type": "BoolInput",
|
||
"advanced": true,
|
||
"display_name": "Current Date",
|
||
"dynamic": false,
|
||
"info": "If true, will add a tool to the agent that returns the current date.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "add_current_date_tool",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "bool",
|
||
"value": true
|
||
},
|
||
"agent_description": {
|
||
"_input_type": "MultilineInput",
|
||
"advanced": true,
|
||
"copy_field": false,
|
||
"display_name": "Agent Description [Deprecated]",
|
||
"dynamic": false,
|
||
"info": "The description of the agent. This is only used when in Tool Mode. Defaults to 'A helpful assistant with access to the following tools:' and tools are added dynamically. This feature is deprecated and will be removed in future versions.",
|
||
"input_types": [
|
||
"Message"
|
||
],
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"multiline": true,
|
||
"name": "agent_description",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_input": true,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": "A helpful assistant with access to the following tools:"
|
||
},
|
||
"agent_llm": {
|
||
"_input_type": "DropdownInput",
|
||
"advanced": false,
|
||
"combobox": false,
|
||
"dialog_inputs": {},
|
||
"display_name": "Model Provider",
|
||
"dynamic": false,
|
||
"external_options": {
|
||
"fields": {
|
||
"data": {
|
||
"node": {
|
||
"display_name": "Connect other models",
|
||
"icon": "CornerDownLeft",
|
||
"name": "connect_other_models"
|
||
}
|
||
}
|
||
}
|
||
},
|
||
"info": "The provider of the language model that the agent will use to generate responses.",
|
||
"input_types": [],
|
||
"name": "agent_llm",
|
||
"options": [
|
||
"Anthropic",
|
||
"Google Generative AI",
|
||
"OpenAI",
|
||
"IBM watsonx.ai",
|
||
"Ollama"
|
||
],
|
||
"options_metadata": [
|
||
{
|
||
"icon": "Anthropic"
|
||
},
|
||
{
|
||
"icon": "GoogleGenerativeAI"
|
||
},
|
||
{
|
||
"icon": "OpenAI"
|
||
},
|
||
{
|
||
"icon": "WatsonxAI"
|
||
},
|
||
{
|
||
"icon": "Ollama"
|
||
}
|
||
],
|
||
"placeholder": "",
|
||
"real_time_refresh": true,
|
||
"refresh_button": false,
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"toggle": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": "OpenAI"
|
||
},
|
||
"api_key": {
|
||
"_input_type": "SecretStrInput",
|
||
"advanced": false,
|
||
"display_name": "OpenAI API Key",
|
||
"dynamic": false,
|
||
"info": "The OpenAI API Key to use for the OpenAI model.",
|
||
"input_types": [],
|
||
"load_from_db": true,
|
||
"name": "api_key",
|
||
"password": true,
|
||
"placeholder": "",
|
||
"real_time_refresh": true,
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"type": "str",
|
||
"value": "OPENAI_API_KEY"
|
||
},
|
||
"base_url": {
|
||
"_input_type": "StrInput",
|
||
"advanced": false,
|
||
"display_name": "Base URL",
|
||
"dynamic": false,
|
||
"info": "The base URL of the API.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"name": "base_url",
|
||
"placeholder": "",
|
||
"required": true,
|
||
"show": false,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": ""
|
||
},
|
||
"code": {
|
||
"advanced": true,
|
||
"dynamic": true,
|
||
"fileTypes": [],
|
||
"file_path": "",
|
||
"info": "",
|
||
"list": false,
|
||
"load_from_db": false,
|
||
"multiline": true,
|
||
"name": "code",
|
||
"password": false,
|
||
"placeholder": "",
|
||
"required": true,
|
||
"show": true,
|
||
"title_case": false,
|
||
"type": "code",
|
||
"value": "import json\nimport re\n\nfrom langchain_core.tools import StructuredTool, Tool\nfrom pydantic import ValidationError\n\nfrom lfx.base.agents.agent import LCToolsAgentComponent\nfrom lfx.base.agents.events import ExceptionWithMessageError\nfrom lfx.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n MODEL_PROVIDERS_LIST,\n MODELS_METADATA,\n)\nfrom lfx.base.models.model_utils import get_model_name\nfrom lfx.components.helpers.current_date import CurrentDateComponent\nfrom lfx.components.helpers.memory import MemoryComponent\nfrom lfx.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom lfx.custom.custom_component.component import get_component_toolkit\nfrom lfx.custom.utils import update_component_build_config\nfrom lfx.helpers.base_model import build_model_from_schema\nfrom lfx.inputs.inputs import BoolInput, SecretStrInput, StrInput\nfrom lfx.io import DropdownInput, IntInput, MessageTextInput, MultilineInput, Output, TableInput\nfrom lfx.log.logger import logger\nfrom lfx.schema.data import Data\nfrom lfx.schema.dotdict import dotdict\nfrom lfx.schema.message import Message\nfrom lfx.schema.table import EditMode\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n documentation: str = \"https://docs.langflow.org/agents\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n # Filter out json_mode from OpenAI inputs since we handle structured output differently\n if \"OpenAI\" in MODEL_PROVIDERS_DICT:\n openai_inputs_filtered = [\n input_field\n for input_field in MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"]\n if not (hasattr(input_field, \"name\") and input_field.name == \"json_mode\")\n ]\n else:\n openai_inputs_filtered = []\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*MODEL_PROVIDERS_LIST],\n value=\"OpenAI\",\n real_time_refresh=True,\n refresh_button=False,\n input_types=[],\n options_metadata=[MODELS_METADATA[key] for key in MODEL_PROVIDERS_LIST if key in MODELS_METADATA],\n external_options={\n \"fields\": {\n \"data\": {\n \"node\": {\n \"name\": \"connect_other_models\",\n \"display_name\": \"Connect other models\",\n \"icon\": \"CornerDownLeft\",\n }\n }\n },\n },\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"API Key\",\n info=\"The API key to use for the model.\",\n required=True,\n ),\n StrInput(\n name=\"base_url\",\n display_name=\"Base URL\",\n info=\"The base URL of the API.\",\n required=True,\n show=False,\n ),\n StrInput(\n name=\"project_id\",\n display_name=\"Project ID\",\n info=\"The project ID of the model.\",\n required=True,\n show=False,\n ),\n IntInput(\n name=\"max_output_tokens\",\n display_name=\"Max Output Tokens\",\n info=\"The maximum number of tokens to generate.\",\n show=False,\n ),\n *openai_inputs_filtered,\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n MessageTextInput(\n name=\"context_id\",\n display_name=\"Context ID\",\n info=\"The context ID of the chat. Adds an extra layer to the local memory.\",\n value=\"\",\n advanced=True,\n ),\n IntInput(\n name=\"n_messages\",\n display_name=\"Number of Chat History Messages\",\n value=100,\n info=\"Number of chat history messages to retrieve.\",\n advanced=True,\n show=True,\n ),\n MultilineInput(\n name=\"format_instructions\",\n display_name=\"Output Format Instructions\",\n info=\"Generic Template for structured output formatting. Valid only with Structured response.\",\n value=(\n \"You are an AI that extracts structured JSON objects from unstructured text. \"\n \"Use a predefined schema with expected types (str, int, float, bool, dict). \"\n \"Extract ALL relevant instances that match the schema - if multiple patterns exist, capture them all. \"\n \"Fill missing or ambiguous values with defaults: null for missing values. \"\n \"Remove exact duplicates but keep variations that have different field values. \"\n \"Always return valid JSON in the expected format, never throw errors. \"\n \"If multiple objects can be extracted, return them all in the structured format.\"\n ),\n advanced=True,\n ),\n TableInput(\n name=\"output_schema\",\n display_name=\"Output Schema\",\n info=(\n \"Schema Validation: Define the structure and data types for structured output. \"\n \"No validation if no output schema.\"\n ),\n advanced=True,\n required=False,\n value=[],\n table_schema=[\n {\n \"name\": \"name\",\n \"display_name\": \"Name\",\n \"type\": \"str\",\n \"description\": \"Specify the name of the output field.\",\n \"default\": \"field\",\n \"edit_mode\": EditMode.INLINE,\n },\n {\n \"name\": \"description\",\n \"display_name\": \"Description\",\n \"type\": \"str\",\n \"description\": \"Describe the purpose of the output field.\",\n \"default\": \"description of field\",\n \"edit_mode\": EditMode.POPOVER,\n },\n {\n \"name\": \"type\",\n \"display_name\": \"Type\",\n \"type\": \"str\",\n \"edit_mode\": EditMode.INLINE,\n \"description\": (\"Indicate the data type of the output field (e.g., str, int, float, bool, dict).\"),\n \"options\": [\"str\", \"int\", \"float\", \"bool\", \"dict\"],\n \"default\": \"str\",\n },\n {\n \"name\": \"multiple\",\n \"display_name\": \"As List\",\n \"type\": \"boolean\",\n \"description\": \"Set to True if this output field should be a list of the specified type.\",\n \"default\": \"False\",\n \"edit_mode\": EditMode.INLINE,\n },\n ],\n ),\n *LCToolsAgentComponent.get_base_inputs(),\n # removed memory inputs from agent component\n # *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [\n Output(name=\"response\", display_name=\"Response\", method=\"message_response\"),\n ]\n\n async def get_agent_requirements(self):\n \"\"\"Get the agent requirements for the agent.\"\"\"\n llm_model, display_name = await self.get_llm()\n if llm_model is None:\n msg = \"No language model selected. Please choose a model to proceed.\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n\n # Get memory data\n self.chat_history = await self.get_memory_data()\n await logger.adebug(f\"Retrieved {len(self.chat_history)} chat history messages\")\n if isinstance(self.chat_history, Message):\n self.chat_history = [self.chat_history]\n\n # Add current date tool if enabled\n if self.add_current_date_tool:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n\n if not isinstance(current_date_tool, StructuredTool):\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n self.tools.append(current_date_tool)\n\n # Set shared callbacks for tracing the tools used by the agent\n self.set_tools_callbacks(self.tools, self._get_shared_callbacks())\n\n return llm_model, self.chat_history, self.tools\n\n async def message_response(self) -> Message:\n try:\n llm_model, self.chat_history, self.tools = await self.get_agent_requirements()\n # Set up and run agent\n self.set(\n llm=llm_model,\n tools=self.tools or [],\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n result = await self.run_agent(agent)\n\n # Store result for potential JSON output\n self._agent_result = result\n\n except (ValueError, TypeError, KeyError) as e:\n await logger.aerror(f\"{type(e).__name__}: {e!s}\")\n raise\n except ExceptionWithMessageError as e:\n await logger.aerror(f\"ExceptionWithMessageError occurred: {e}\")\n raise\n # Avoid catching blind Exception; let truly unexpected exceptions propagate\n except Exception as e:\n await logger.aerror(f\"Unexpected error: {e!s}\")\n raise\n else:\n return result\n\n def _preprocess_schema(self, schema):\n \"\"\"Preprocess schema to ensure correct data types for build_model_from_schema.\"\"\"\n processed_schema = []\n for field in schema:\n processed_field = {\n \"name\": str(field.get(\"name\", \"field\")),\n \"type\": str(field.get(\"type\", \"str\")),\n \"description\": str(field.get(\"description\", \"\")),\n \"multiple\": field.get(\"multiple\", False),\n }\n # Ensure multiple is handled correctly\n if isinstance(processed_field[\"multiple\"], str):\n processed_field[\"multiple\"] = processed_field[\"multiple\"].lower() in [\n \"true\",\n \"1\",\n \"t\",\n \"y\",\n \"yes\",\n ]\n processed_schema.append(processed_field)\n return processed_schema\n\n async def build_structured_output_base(self, content: str):\n \"\"\"Build structured output with optional BaseModel validation.\"\"\"\n json_pattern = r\"\\{.*\\}\"\n schema_error_msg = \"Try setting an output schema\"\n\n # Try to parse content as JSON first\n json_data = None\n try:\n json_data = json.loads(content)\n except json.JSONDecodeError:\n json_match = re.search(json_pattern, content, re.DOTALL)\n if json_match:\n try:\n json_data = json.loads(json_match.group())\n except json.JSONDecodeError:\n return {\"content\": content, \"error\": schema_error_msg}\n else:\n return {\"content\": content, \"error\": schema_error_msg}\n\n # If no output schema provided, return parsed JSON without validation\n if not hasattr(self, \"output_schema\") or not self.output_schema or len(self.output_schema) == 0:\n return json_data\n\n # Use BaseModel validation with schema\n try:\n processed_schema = self._preprocess_schema(self.output_schema)\n output_model = build_model_from_schema(processed_schema)\n\n # Validate against the schema\n if isinstance(json_data, list):\n # Multiple objects\n validated_objects = []\n for item in json_data:\n try:\n validated_obj = output_model.model_validate(item)\n validated_objects.append(validated_obj.model_dump())\n except ValidationError as e:\n await logger.aerror(f\"Validation error for item: {e}\")\n # Include invalid items with error info\n validated_objects.append({\"data\": item, \"validation_error\": str(e)})\n return validated_objects\n\n # Single object\n try:\n validated_obj = output_model.model_validate(json_data)\n return [validated_obj.model_dump()] # Return as list for consistency\n except ValidationError as e:\n await logger.aerror(f\"Validation error: {e}\")\n return [{\"data\": json_data, \"validation_error\": str(e)}]\n\n except (TypeError, ValueError) as e:\n await logger.aerror(f\"Error building structured output: {e}\")\n # Fallback to parsed JSON without validation\n return json_data\n\n async def json_response(self) -> Data:\n \"\"\"Convert agent response to structured JSON Data output with schema validation.\"\"\"\n # Always use structured chat agent for JSON response mode for better JSON formatting\n try:\n system_components = []\n\n # 1. Agent Instructions (system_prompt)\n agent_instructions = getattr(self, \"system_prompt\", \"\") or \"\"\n if agent_instructions:\n system_components.append(f\"{agent_instructions}\")\n\n # 2. Format Instructions\n format_instructions = getattr(self, \"format_instructions\", \"\") or \"\"\n if format_instructions:\n system_components.append(f\"Format instructions: {format_instructions}\")\n\n # 3. Schema Information from BaseModel\n if hasattr(self, \"output_schema\") and self.output_schema and len(self.output_schema) > 0:\n try:\n processed_schema = self._preprocess_schema(self.output_schema)\n output_model = build_model_from_schema(processed_schema)\n schema_dict = output_model.model_json_schema()\n schema_info = (\n \"You are given some text that may include format instructions, \"\n \"explanations, or other content alongside a JSON schema.\\n\\n\"\n \"Your task:\\n\"\n \"- Extract only the JSON schema.\\n\"\n \"- Return it as valid JSON.\\n\"\n \"- Do not include format instructions, explanations, or extra text.\\n\\n\"\n \"Input:\\n\"\n f\"{json.dumps(schema_dict, indent=2)}\\n\\n\"\n \"Output (only JSON schema):\"\n )\n system_components.append(schema_info)\n except (ValidationError, ValueError, TypeError, KeyError) as e:\n await logger.aerror(f\"Could not build schema for prompt: {e}\", exc_info=True)\n\n # Combine all components\n combined_instructions = \"\\n\\n\".join(system_components) if system_components else \"\"\n llm_model, self.chat_history, self.tools = await self.get_agent_requirements()\n self.set(\n llm=llm_model,\n tools=self.tools or [],\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=combined_instructions,\n )\n\n # Create and run structured chat agent\n try:\n structured_agent = self.create_agent_runnable()\n except (NotImplementedError, ValueError, TypeError) as e:\n await logger.aerror(f\"Error with structured chat agent: {e}\")\n raise\n try:\n result = await self.run_agent(structured_agent)\n except (\n ExceptionWithMessageError,\n ValueError,\n TypeError,\n RuntimeError,\n ) as e:\n await logger.aerror(f\"Error with structured agent result: {e}\")\n raise\n # Extract content from structured agent result\n if hasattr(result, \"content\"):\n content = result.content\n elif hasattr(result, \"text\"):\n content = result.text\n else:\n content = str(result)\n\n except (\n ExceptionWithMessageError,\n ValueError,\n TypeError,\n NotImplementedError,\n AttributeError,\n ) as e:\n await logger.aerror(f\"Error with structured chat agent: {e}\")\n # Fallback to regular agent\n content_str = \"No content returned from agent\"\n return Data(data={\"content\": content_str, \"error\": str(e)})\n\n # Process with structured output validation\n try:\n structured_output = await self.build_structured_output_base(content)\n\n # Handle different output formats\n if isinstance(structured_output, list) and structured_output:\n if len(structured_output) == 1:\n return Data(data=structured_output[0])\n return Data(data={\"results\": structured_output})\n if isinstance(structured_output, dict):\n return Data(data=structured_output)\n return Data(data={\"content\": content})\n\n except (ValueError, TypeError) as e:\n await logger.aerror(f\"Error in structured output processing: {e}\")\n return Data(data={\"content\": content, \"error\": str(e)})\n\n async def get_memory_data(self):\n # TODO: This is a temporary fix to avoid message duplication. We should develop a function for this.\n messages = (\n await MemoryComponent(**self.get_base_args())\n .set(\n session_id=self.graph.session_id,\n context_id=self.context_id,\n order=\"Ascending\",\n n_messages=self.n_messages,\n )\n .retrieve_messages()\n )\n return [\n message for message in messages if getattr(message, \"id\", None) != getattr(self.input_value, \"id\", None)\n ]\n\n async def get_llm(self):\n if not isinstance(self.agent_llm, str):\n return self.agent_llm, None\n\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if not provider_info:\n msg = f\"Invalid model provider: {self.agent_llm}\"\n raise ValueError(msg)\n\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n\n return self._build_llm_model(component_class, inputs, prefix), display_name\n\n except (AttributeError, ValueError, TypeError, RuntimeError) as e:\n await logger.aerror(f\"Error building {self.agent_llm} language model: {e!s}\")\n msg = f\"Failed to initialize language model: {e!s}\"\n raise ValueError(msg) from e\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n # Filter out json_mode and only use attributes that exist on this component\n model_kwargs = {}\n for input_ in inputs:\n if hasattr(self, f\"{prefix}{input_.name}\"):\n model_kwargs[input_.name] = getattr(self, f\"{prefix}{input_.name}\")\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n if build_config is not None and field in build_config:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n build_config[\"agent_llm\"][\"display_name\"] = \"Model Provider\"\n elif field_value == \"connect_other_models\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*MODEL_PROVIDERS_LIST],\n real_time_refresh=True,\n refresh_button=False,\n input_types=[\"LanguageModel\"],\n placeholder=\"Awaiting model input.\",\n options_metadata=[MODELS_METADATA[key] for key in MODEL_PROVIDERS_LIST if key in MODELS_METADATA],\n external_options={\n \"fields\": {\n \"data\": {\n \"node\": {\n \"name\": \"connect_other_models\",\n \"display_name\": \"Connect other models\",\n \"icon\": \"CornerDownLeft\",\n },\n }\n },\n },\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def _get_tools(self) -> list[Tool]:\n component_toolkit = get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n\n tools = component_toolkit(component=self).get_tools(\n tool_name=\"Call_Agent\",\n tool_description=description,\n # here we do not use the shared callbacks as we are exposing the agent as a tool\n callbacks=self.get_langchain_callbacks(),\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n\n return tools\n"
|
||
},
|
||
"context_id": {
|
||
"_input_type": "MessageTextInput",
|
||
"advanced": true,
|
||
"display_name": "Context ID",
|
||
"dynamic": false,
|
||
"info": "The context ID of the chat. Adds an extra layer to the local memory.",
|
||
"input_types": [
|
||
"Message"
|
||
],
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"name": "context_id",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_input": true,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": ""
|
||
},
|
||
"format_instructions": {
|
||
"_input_type": "MultilineInput",
|
||
"advanced": true,
|
||
"copy_field": false,
|
||
"display_name": "Output Format Instructions",
|
||
"dynamic": false,
|
||
"info": "Generic Template for structured output formatting. Valid only with Structured response.",
|
||
"input_types": [
|
||
"Message"
|
||
],
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"multiline": true,
|
||
"name": "format_instructions",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_input": true,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": "You are an AI that extracts structured JSON objects from unstructured text. Use a predefined schema with expected types (str, int, float, bool, dict). Extract ALL relevant instances that match the schema - if multiple patterns exist, capture them all. Fill missing or ambiguous values with defaults: null for missing values. Remove exact duplicates but keep variations that have different field values. Always return valid JSON in the expected format, never throw errors. If multiple objects can be extracted, return them all in the structured format."
|
||
},
|
||
"handle_parsing_errors": {
|
||
"_input_type": "BoolInput",
|
||
"advanced": true,
|
||
"display_name": "Handle Parse Errors",
|
||
"dynamic": false,
|
||
"info": "Should the Agent fix errors when reading user input for better processing?",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "handle_parsing_errors",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "bool",
|
||
"value": true
|
||
},
|
||
"input_value": {
|
||
"_input_type": "MessageInput",
|
||
"advanced": false,
|
||
"display_name": "Input",
|
||
"dynamic": false,
|
||
"info": "The input provided by the user for the agent to process.",
|
||
"input_types": [
|
||
"Message"
|
||
],
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"name": "input_value",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": true,
|
||
"trace_as_input": true,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": ""
|
||
},
|
||
"max_iterations": {
|
||
"_input_type": "IntInput",
|
||
"advanced": true,
|
||
"display_name": "Max Iterations",
|
||
"dynamic": false,
|
||
"info": "The maximum number of attempts the agent can make to complete its task before it stops.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "max_iterations",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "int",
|
||
"value": 15
|
||
},
|
||
"max_output_tokens": {
|
||
"_input_type": "IntInput",
|
||
"advanced": false,
|
||
"display_name": "Max Output Tokens",
|
||
"dynamic": false,
|
||
"info": "The maximum number of tokens to generate.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "max_output_tokens",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": false,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "int",
|
||
"value": ""
|
||
},
|
||
"max_retries": {
|
||
"_input_type": "IntInput",
|
||
"advanced": true,
|
||
"display_name": "Max Retries",
|
||
"dynamic": false,
|
||
"info": "The maximum number of retries to make when generating.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "max_retries",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "int",
|
||
"value": 5
|
||
},
|
||
"max_tokens": {
|
||
"_input_type": "IntInput",
|
||
"advanced": true,
|
||
"display_name": "Max Tokens",
|
||
"dynamic": false,
|
||
"info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "max_tokens",
|
||
"placeholder": "",
|
||
"range_spec": {
|
||
"max": 128000,
|
||
"min": 0,
|
||
"step": 0.1,
|
||
"step_type": "float"
|
||
},
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "int",
|
||
"value": ""
|
||
},
|
||
"model_kwargs": {
|
||
"_input_type": "DictInput",
|
||
"advanced": true,
|
||
"display_name": "Model Kwargs",
|
||
"dynamic": false,
|
||
"info": "Additional keyword arguments to pass to the model.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "model_kwargs",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_input": true,
|
||
"type": "dict",
|
||
"value": {}
|
||
},
|
||
"model_name": {
|
||
"_input_type": "DropdownInput",
|
||
"advanced": false,
|
||
"combobox": true,
|
||
"dialog_inputs": {},
|
||
"display_name": "Model Name",
|
||
"dynamic": false,
|
||
"external_options": {},
|
||
"info": "To see the model names, first choose a provider. Then, enter your API key and click the refresh button next to the model name.",
|
||
"name": "model_name",
|
||
"options": [
|
||
"gpt-4o"
|
||
],
|
||
"options_metadata": [],
|
||
"placeholder": "",
|
||
"real_time_refresh": false,
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"toggle": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": "gpt-4o"
|
||
},
|
||
"n_messages": {
|
||
"_input_type": "IntInput",
|
||
"advanced": true,
|
||
"display_name": "Number of Chat History Messages",
|
||
"dynamic": false,
|
||
"info": "Number of chat history messages to retrieve.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "n_messages",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "int",
|
||
"value": 100
|
||
},
|
||
"openai_api_base": {
|
||
"_input_type": "StrInput",
|
||
"advanced": true,
|
||
"display_name": "OpenAI API Base",
|
||
"dynamic": false,
|
||
"info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"name": "openai_api_base",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": ""
|
||
},
|
||
"output_schema": {
|
||
"_input_type": "TableInput",
|
||
"advanced": true,
|
||
"display_name": "Output Schema",
|
||
"dynamic": false,
|
||
"info": "Schema Validation: Define the structure and data types for structured output. No validation if no output schema.",
|
||
"is_list": true,
|
||
"list_add_label": "Add More",
|
||
"name": "output_schema",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"table_icon": "Table",
|
||
"table_schema": [
|
||
{
|
||
"default": "field",
|
||
"description": "Specify the name of the output field.",
|
||
"display_name": "Name",
|
||
"edit_mode": "inline",
|
||
"name": "name",
|
||
"type": "str"
|
||
},
|
||
{
|
||
"default": "description of field",
|
||
"description": "Describe the purpose of the output field.",
|
||
"display_name": "Description",
|
||
"edit_mode": "popover",
|
||
"name": "description",
|
||
"type": "str"
|
||
},
|
||
{
|
||
"default": "str",
|
||
"description": "Indicate the data type of the output field (e.g., str, int, float, bool, dict).",
|
||
"display_name": "Type",
|
||
"edit_mode": "inline",
|
||
"name": "type",
|
||
"options": [
|
||
"str",
|
||
"int",
|
||
"float",
|
||
"bool",
|
||
"dict"
|
||
],
|
||
"type": "str"
|
||
},
|
||
{
|
||
"default": "False",
|
||
"description": "Set to True if this output field should be a list of the specified type.",
|
||
"display_name": "As List",
|
||
"edit_mode": "inline",
|
||
"name": "multiple",
|
||
"type": "boolean"
|
||
}
|
||
],
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"trigger_icon": "Table",
|
||
"trigger_text": "Open table",
|
||
"type": "table",
|
||
"value": []
|
||
},
|
||
"project_id": {
|
||
"_input_type": "StrInput",
|
||
"advanced": false,
|
||
"display_name": "Project ID",
|
||
"dynamic": false,
|
||
"info": "The project ID of the model.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"name": "project_id",
|
||
"placeholder": "",
|
||
"required": true,
|
||
"show": false,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": ""
|
||
},
|
||
"seed": {
|
||
"_input_type": "IntInput",
|
||
"advanced": true,
|
||
"display_name": "Seed",
|
||
"dynamic": false,
|
||
"info": "The seed controls the reproducibility of the job.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "seed",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "int",
|
||
"value": 1
|
||
},
|
||
"system_prompt": {
|
||
"_input_type": "MultilineInput",
|
||
"advanced": false,
|
||
"copy_field": false,
|
||
"display_name": "Agent Instructions",
|
||
"dynamic": false,
|
||
"info": "System Prompt: Initial instructions and context provided to guide the agent's behavior.",
|
||
"input_types": [
|
||
"Message"
|
||
],
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"multiline": true,
|
||
"name": "system_prompt",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_input": true,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": "You are the OpenRAG Agent. You answer questions using retrieval, reasoning, and tool use.\nYou have access to several tools. Your job is to determine **which tool to use and when**.\n### Available Tools\n- OpenSearch Retrieval Tool:\n Use this to search the indexed knowledge base. Use when the user asks about product details, internal concepts, processes, architecture, documentation, roadmaps, or anything that may be stored in the index.\n- Conversation History:\n Use this to maintain continuity when the user is referring to previous turns. \n Do not treat history as a factual source.\n- Conversation File Context:\n Use this when the user asks about a document they uploaded or refers directly to its contents.\n- URL Ingestion Tool:\n Use this **only** when the user explicitly asks you to read, summarize, or analyze the content of a URL.\n Do not ingest URLs automatically.\n- Calculator / Expression Evaluation Tool:\n Use this when the user asks to compare numbers, compute estimates, calculate totals, analyze pricing, or answer any question requiring mathematics or quantitative reasoning.\n If the answer requires arithmetic, call the calculator tool rather than calculating internally.\n### Retrieval Decision Rules\nUse OpenSearch **whenever**:\n1. The question may be answered from internal or indexed data.\n2. The user references team names, product names, release plans, configurations, requirements, or official information.\n3. The user needs a factual, grounded answer.\nDo **not** use retrieval if:\n- The question is purely creative (e.g., storytelling, analogies) or personal preference.\n- The user simply wants text reformatted or rewritten from what is already present in the conversation.\nWhen uncertain → **Retrieve.** Retrieval is low risk and improves grounding.\n### URL Ingestion Rules\nOnly ingest URLs when the user explicitly says:\n- \"Read this link\"\n- \"Summarize this webpage\"\n- \"What does this site say?\"\n- \"Ingest this URL\"\nIf unclear → ask a clarifying question.\n### Calculator Usage Rules\nUse the calculator when:\n- Performing arithmetic\n- Estimating totals\n- Comparing values\n- Modeling cost, time, effort, scale, or projections\nDo not perform math internally. **Call the calculator tool instead.**\n### Answer Construction Rules\n1. When asked: \"What is OpenRAG\", answer the following:\n\"OpenRAG is an open-source package for building agentic RAG systems. It supports integration with a wide range of orchestration tools, vector databases, and LLM providers. OpenRAG connects and amplifies three popular, proven open-source projects into one powerful platform:\n**Langflow** – Langflow is a powerful tool to build and deploy AI agents and MCP servers [Read more](https://www.langflow.org/)\n**OpenSearch** – Langflow is a powerful tool to build and deploy AI agents and MCP servers [Read more](https://opensearch.org/)\n**Docling** – Langflow is a powerful tool to build and deploy AI agents and MCP servers [Read more](https://www.docling.ai/)\"\n2. Synthesize retrieved or ingested content in your own words.\n3. Support factual claims with citations in the format:\n (Source: <document_name_or_id>)\n4. If no supporting evidence is found:\n Say: \"No relevant supporting sources were found for that request.\"\n5. Never invent facts or hallucinate details.\n6. Be concise, direct, and confident. \n7. Do not reveal internal chain-of-thought."
|
||
},
|
||
"temperature": {
|
||
"_input_type": "SliderInput",
|
||
"advanced": true,
|
||
"display_name": "Temperature",
|
||
"dynamic": false,
|
||
"info": "",
|
||
"max_label": "",
|
||
"max_label_icon": "",
|
||
"min_label": "",
|
||
"min_label_icon": "",
|
||
"name": "temperature",
|
||
"placeholder": "",
|
||
"range_spec": {
|
||
"max": 1,
|
||
"min": 0,
|
||
"step": 0.01,
|
||
"step_type": "float"
|
||
},
|
||
"required": false,
|
||
"show": true,
|
||
"slider_buttons": false,
|
||
"slider_buttons_options": [],
|
||
"slider_input": false,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"type": "slider",
|
||
"value": 0.1
|
||
},
|
||
"timeout": {
|
||
"_input_type": "IntInput",
|
||
"advanced": true,
|
||
"display_name": "Timeout",
|
||
"dynamic": false,
|
||
"info": "The timeout for requests to OpenAI completion API.",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "timeout",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "int",
|
||
"value": 700
|
||
},
|
||
"tools": {
|
||
"_input_type": "HandleInput",
|
||
"advanced": false,
|
||
"display_name": "Tools",
|
||
"dynamic": false,
|
||
"info": "These are the tools that the agent can use to help with tasks.",
|
||
"input_types": [
|
||
"Tool"
|
||
],
|
||
"list": true,
|
||
"list_add_label": "Add More",
|
||
"name": "tools",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"trace_as_metadata": true,
|
||
"type": "other",
|
||
"value": ""
|
||
},
|
||
"verbose": {
|
||
"_input_type": "BoolInput",
|
||
"advanced": true,
|
||
"display_name": "Verbose",
|
||
"dynamic": false,
|
||
"info": "",
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"name": "verbose",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "bool",
|
||
"value": true
|
||
}
|
||
},
|
||
"tool_mode": false
|
||
},
|
||
"showNode": true,
|
||
"type": "Agent"
|
||
},
|
||
"dragging": false,
|
||
"id": "Agent-Nfw7u",
|
||
"measured": {
|
||
"height": 594,
|
||
"width": 320
|
||
},
|
||
"position": {
|
||
"x": 1629.578423203229,
|
||
"y": 451.946400444934
|
||
},
|
||
"selected": true,
|
||
"type": "genericNode"
|
||
},
|
||
{
|
||
"data": {
|
||
"id": "CalculatorComponent-KrlMH",
|
||
"node": {
|
||
"base_classes": [
|
||
"Data"
|
||
],
|
||
"beta": false,
|
||
"conditional_paths": [],
|
||
"custom_fields": {},
|
||
"description": "Perform basic arithmetic operations on a given expression.",
|
||
"display_name": "Calculator",
|
||
"documentation": "https://docs.langflow.org/components-helpers#calculator",
|
||
"edited": false,
|
||
"field_order": [
|
||
"expression"
|
||
],
|
||
"frozen": false,
|
||
"icon": "calculator",
|
||
"last_updated": "2025-11-11T21:40:50.133Z",
|
||
"legacy": false,
|
||
"metadata": {
|
||
"code_hash": "5fcfa26be77d",
|
||
"dependencies": {
|
||
"dependencies": [
|
||
{
|
||
"name": "lfx",
|
||
"version": null
|
||
}
|
||
],
|
||
"total_dependencies": 1
|
||
},
|
||
"module": "lfx.components.helpers.calculator_core.CalculatorComponent"
|
||
},
|
||
"minimized": false,
|
||
"output_types": [],
|
||
"outputs": [
|
||
{
|
||
"allows_loop": false,
|
||
"cache": true,
|
||
"display_name": "Toolset",
|
||
"group_outputs": false,
|
||
"hidden": null,
|
||
"method": "to_toolkit",
|
||
"name": "component_as_tool",
|
||
"options": null,
|
||
"required_inputs": null,
|
||
"selected": "Tool",
|
||
"tool_mode": true,
|
||
"types": [
|
||
"Tool"
|
||
],
|
||
"value": "__UNDEFINED__"
|
||
}
|
||
],
|
||
"pinned": false,
|
||
"template": {
|
||
"_type": "Component",
|
||
"code": {
|
||
"advanced": true,
|
||
"dynamic": true,
|
||
"fileTypes": [],
|
||
"file_path": "",
|
||
"info": "",
|
||
"list": false,
|
||
"load_from_db": false,
|
||
"multiline": true,
|
||
"name": "code",
|
||
"password": false,
|
||
"placeholder": "",
|
||
"required": true,
|
||
"show": true,
|
||
"title_case": false,
|
||
"type": "code",
|
||
"value": "import ast\nimport operator\nfrom collections.abc import Callable\n\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.inputs.inputs import MessageTextInput\nfrom lfx.io import Output\nfrom lfx.schema.data import Data\n\n\nclass CalculatorComponent(Component):\n display_name = \"Calculator\"\n description = \"Perform basic arithmetic operations on a given expression.\"\n documentation: str = \"https://docs.langflow.org/components-helpers#calculator\"\n icon = \"calculator\"\n\n # Cache operators dictionary as a class variable\n OPERATORS: dict[type[ast.operator], Callable] = {\n ast.Add: operator.add,\n ast.Sub: operator.sub,\n ast.Mult: operator.mul,\n ast.Div: operator.truediv,\n ast.Pow: operator.pow,\n }\n\n inputs = [\n MessageTextInput(\n name=\"expression\",\n display_name=\"Expression\",\n info=\"The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').\",\n tool_mode=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"result\", type_=Data, method=\"evaluate_expression\"),\n ]\n\n def _eval_expr(self, node: ast.AST) -> float:\n \"\"\"Evaluate an AST node recursively.\"\"\"\n if isinstance(node, ast.Constant):\n if isinstance(node.value, int | float):\n return float(node.value)\n error_msg = f\"Unsupported constant type: {type(node.value).__name__}\"\n raise TypeError(error_msg)\n if isinstance(node, ast.Num): # For backwards compatibility\n if isinstance(node.n, int | float):\n return float(node.n)\n error_msg = f\"Unsupported number type: {type(node.n).__name__}\"\n raise TypeError(error_msg)\n\n if isinstance(node, ast.BinOp):\n op_type = type(node.op)\n if op_type not in self.OPERATORS:\n error_msg = f\"Unsupported binary operator: {op_type.__name__}\"\n raise TypeError(error_msg)\n\n left = self._eval_expr(node.left)\n right = self._eval_expr(node.right)\n return self.OPERATORS[op_type](left, right)\n\n error_msg = f\"Unsupported operation or expression type: {type(node).__name__}\"\n raise TypeError(error_msg)\n\n def evaluate_expression(self) -> Data:\n \"\"\"Evaluate the mathematical expression and return the result.\"\"\"\n try:\n tree = ast.parse(self.expression, mode=\"eval\")\n result = self._eval_expr(tree.body)\n\n formatted_result = f\"{float(result):.6f}\".rstrip(\"0\").rstrip(\".\")\n self.log(f\"Calculation result: {formatted_result}\")\n\n self.status = formatted_result\n return Data(data={\"result\": formatted_result})\n\n except ZeroDivisionError:\n error_message = \"Error: Division by zero\"\n self.status = error_message\n return Data(data={\"error\": error_message, \"input\": self.expression})\n\n except (SyntaxError, TypeError, KeyError, ValueError, AttributeError, OverflowError) as e:\n error_message = f\"Invalid expression: {e!s}\"\n self.status = error_message\n return Data(data={\"error\": error_message, \"input\": self.expression})\n\n def build(self):\n \"\"\"Return the main evaluation function.\"\"\"\n return self.evaluate_expression\n"
|
||
},
|
||
"expression": {
|
||
"_input_type": "MessageTextInput",
|
||
"advanced": false,
|
||
"display_name": "Expression",
|
||
"dynamic": false,
|
||
"info": "The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').",
|
||
"input_types": [
|
||
"Message"
|
||
],
|
||
"list": false,
|
||
"list_add_label": "Add More",
|
||
"load_from_db": false,
|
||
"name": "expression",
|
||
"placeholder": "",
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": true,
|
||
"trace_as_input": true,
|
||
"trace_as_metadata": true,
|
||
"type": "str",
|
||
"value": ""
|
||
},
|
||
"tools_metadata": {
|
||
"_input_type": "ToolsInput",
|
||
"advanced": false,
|
||
"display_name": "Actions",
|
||
"dynamic": false,
|
||
"info": "Modify tool names and descriptions to help agents understand when to use each tool.",
|
||
"is_list": true,
|
||
"list_add_label": "Add More",
|
||
"name": "tools_metadata",
|
||
"placeholder": "",
|
||
"real_time_refresh": true,
|
||
"required": false,
|
||
"show": true,
|
||
"title_case": false,
|
||
"tool_mode": false,
|
||
"trace_as_metadata": true,
|
||
"type": "tools",
|
||
"value": [
|
||
{
|
||
"_uniqueId": "evaluate_expression_evaluate_expression_0",
|
||
"args": {
|
||
"expression": {
|
||
"default": "",
|
||
"description": "The arithmetic expression to evaluate (e.g., '4*4*(33/22)+12-20').",
|
||
"title": "Expression",
|
||
"type": "string"
|
||
}
|
||
},
|
||
"description": "Perform basic arithmetic operations on a given expression.",
|
||
"display_description": "Perform basic arithmetic operations on a given expression.",
|
||
"display_name": "evaluate_expression",
|
||
"name": "evaluate_expression",
|
||
"readonly": false,
|
||
"status": true,
|
||
"tags": [
|
||
"evaluate_expression"
|
||
]
|
||
}
|
||
]
|
||
}
|
||
},
|
||
"tool_mode": true
|
||
},
|
||
"showNode": true,
|
||
"type": "CalculatorComponent"
|
||
},
|
||
"dragging": false,
|
||
"id": "CalculatorComponent-KrlMH",
|
||
"measured": {
|
||
"height": 218,
|
||
"width": 320
|
||
},
|
||
"position": {
|
||
"x": 720.5458956206228,
|
||
"y": 1220.8583072055585
|
||
},
|
||
"selected": false,
|
||
"type": "genericNode"
|
||
}
|
||
],
|
||
"viewport": {
|
||
"x": -37.553133726465376,
|
||
"y": 28.26764925979927,
|
||
"zoom": 0.6144649479685779
|
||
}
|
||
},
|
||
"description": "OpenRAG OpenSearch Agent",
|
||
"endpoint_name": null,
|
||
"id": "1098eea1-6649-4e1d-aed1-b77249fb8dd0",
|
||
"is_component": false,
|
||
"last_tested_version": "1.7.0",
|
||
"name": "OpenRAG OpenSearch Agent",
|
||
"tags": [
|
||
"assistants",
|
||
"agents"
|
||
]
|
||
} |