diff --git a/flows/openrag_agent.json b/flows/openrag_agent.json index d9ad0b82..700808a9 100644 --- a/flows/openrag_agent.json +++ b/flows/openrag_agent.json @@ -29,34 +29,6 @@ "target": "Agent-Nfw7u", "targetHandle": "{œfieldNameœ:œtoolsœ,œidœ:œAgent-Nfw7uœ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}" }, - { - "animated": false, - "className": "", - "data": { - "sourceHandle": { - "dataType": "ChatInput", - "id": "ChatInput-ci8VE", - "name": "message", - "output_types": [ - "Message" - ] - }, - "targetHandle": { - "fieldName": "input_value", - "id": "Agent-Nfw7u", - "inputTypes": [ - "Message" - ], - "type": "str" - } - }, - "id": "xy-edge__ChatInput-ci8VE{œdataTypeœ:œChatInputœ,œidœ:œChatInput-ci8VEœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-Agent-Nfw7u{œfieldNameœ:œinput_valueœ,œidœ:œAgent-Nfw7uœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", - "selected": false, - "source": "ChatInput-ci8VE", - "sourceHandle": "{œdataTypeœ:œChatInputœ,œidœ:œChatInput-ci8VEœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}", - "target": "Agent-Nfw7u", - "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œAgent-Nfw7uœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" - }, { "animated": false, "className": "", @@ -227,6 +199,90 @@ "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvE", "targetHandle": "{œfieldNameœ:œfilter_expressionœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-TyvvEœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" }, + { + "animated": false, + "className": "", + "data": { + "sourceHandle": { + "dataType": "ChatInput", + "id": "ChatInput-ci8VE", + "name": "message", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "input", + "id": "Prompt Template-7kZsI", + "inputTypes": [ + "Message" + ], + "type": "str" + } + }, + "id": "xy-edge__ChatInput-ci8VE{œdataTypeœ:œChatInputœ,œidœ:œChatInput-ci8VEœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-Prompt Template-7kZsI{œfieldNameœ:œinputœ,œidœ:œPrompt Template-7kZsIœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "selected": false, + "source": "ChatInput-ci8VE", + "sourceHandle": "{œdataTypeœ:œChatInputœ,œidœ:œChatInput-ci8VEœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}", + "target": "Prompt Template-7kZsI", + "targetHandle": "{œfieldNameœ:œinputœ,œidœ:œPrompt Template-7kZsIœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" + }, + { + "animated": false, + "className": "", + "data": { + "sourceHandle": { + "dataType": "Prompt Template", + "id": "Prompt Template-7kZsI", + "name": "prompt", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "input_value", + "id": "Agent-Nfw7u", + "inputTypes": [ + "Message" + ], + "type": "str" + } + }, + "id": "xy-edge__Prompt Template-7kZsI{œdataTypeœ:œPrompt Templateœ,œidœ:œPrompt Template-7kZsIœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}-Agent-Nfw7u{œfieldNameœ:œinput_valueœ,œidœ:œAgent-Nfw7uœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "selected": false, + "source": "Prompt Template-7kZsI", + "sourceHandle": "{œdataTypeœ:œPrompt Templateœ,œidœ:œPrompt Template-7kZsIœ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}", + "target": "Agent-Nfw7u", + "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œAgent-Nfw7uœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" + }, + { + "animated": false, + "className": "", + "data": { + "sourceHandle": { + "dataType": "TextInput", + "id": "TextInput-aHsQb", + "name": "text", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "filter", + "id": "Prompt Template-7kZsI", + "inputTypes": [ + "Message" + ], + "type": "str" + } + }, + "id": "xy-edge__TextInput-aHsQb{œdataTypeœ:œTextInputœ,œidœ:œTextInput-aHsQbœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-Prompt Template-7kZsI{œfieldNameœ:œfilterœ,œidœ:œPrompt Template-7kZsIœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "selected": false, + "source": "TextInput-aHsQb", + "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-aHsQbœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "target": "Prompt Template-7kZsI", + "targetHandle": "{œfieldNameœ:œfilterœ,œidœ:œPrompt Template-7kZsIœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" + }, { "animated": false, "className": "", @@ -351,8 +407,8 @@ "width": 320 }, "position": { - "x": 503.8866998170472, - "y": 2288.794090320999 + "x": 499.6078970988059, + "y": 2370.091341967585 }, "selected": false, "type": "genericNode" @@ -381,7 +437,7 @@ "frozen": false, "icon": "Mcp", "key": "mcp_lf-starter_project", - "last_updated": "2025-11-26T05:22:26.296Z", + "last_updated": "2025-12-02T21:33:13.267Z", "legacy": false, "mcpServerName": "lf-starter_project", "metadata": { @@ -433,7 +489,7 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" + "value": "69a7745e-dfb8-40a7-b5cb-5da3af0b10b6" }, "_type": "Component", "code": { @@ -601,7 +657,7 @@ "width": 320 }, "position": { - "x": 1508.8015756352295, + "x": 1497.4887425692002, "y": 1384.557089807625 }, "selected": false, @@ -927,8 +983,8 @@ "width": 192 }, "position": { - "x": 1599.1877452584524, - "y": 2275.678637253258 + "x": 1136.2811923974275, + "y": 2503.9876597481666 }, "selected": false, "type": "genericNode" @@ -1279,7 +1335,7 @@ ], "frozen": false, "icon": "bot", - "last_updated": "2025-11-26T05:22:26.298Z", + "last_updated": "2025-12-02T21:33:13.268Z", "legacy": false, "metadata": { "code_hash": "d64b11c24a1c", @@ -1329,7 +1385,7 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" + "value": "69a7745e-dfb8-40a7-b5cb-5da3af0b10b6" }, "_type": "Component", "add_current_date_tool": { @@ -1427,6 +1483,9 @@ "real_time_refresh": true, "refresh_button": false, "required": false, + "selected_metadata": { + "icon": "OpenAI" + }, "show": true, "title_case": false, "toggle": false, @@ -1692,7 +1751,26 @@ "input_types": [], "name": "model_name", "options": [ - "gpt-4o" + "gpt-4o-mini", + "gpt-4o", + "gpt-4.1", + "gpt-4.1-mini", + "gpt-4.1-nano", + "gpt-4-turbo", + "gpt-4-turbo-preview", + "gpt-4", + "gpt-3.5-turbo", + "gpt-5.1", + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-chat-latest", + "o1", + "o3-mini", + "o3", + "o3-pro", + "o4-mini", + "o4-mini-high" ], "options_metadata": [], "override_skip": false, @@ -1706,7 +1784,7 @@ "trace_as_metadata": true, "track_in_telemetry": true, "type": "str", - "value": "gpt-4o" + "value": "gpt-4o-mini" }, "n_messages": { "_input_type": "IntInput", @@ -1770,6 +1848,7 @@ "description": "Specify the name of the output field.", "display_name": "Name", "edit_mode": "inline", + "formatter": "text", "name": "name", "type": "str" }, @@ -1778,6 +1857,7 @@ "description": "Describe the purpose of the output field.", "display_name": "Description", "edit_mode": "popover", + "formatter": "text", "name": "description", "type": "str" }, @@ -1786,6 +1866,7 @@ "description": "Indicate the data type of the output field (e.g., str, int, float, bool, dict).", "display_name": "Type", "edit_mode": "inline", + "formatter": "text", "name": "type", "options": [ "str", @@ -1801,6 +1882,7 @@ "description": "Set to True if this output field should be a list of the specified type.", "display_name": "As List", "edit_mode": "inline", + "formatter": "text", "name": "multiple", "type": "boolean" } @@ -1994,7 +2076,7 @@ ], "frozen": false, "icon": "calculator", - "last_updated": "2025-11-26T05:22:26.299Z", + "last_updated": "2025-12-02T21:33:13.268Z", "legacy": false, "metadata": { "code_hash": "acbe2603b034", @@ -2037,7 +2119,7 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" + "value": "69a7745e-dfb8-40a7-b5cb-5da3af0b10b6" }, "_type": "Component", "code": { @@ -2180,7 +2262,7 @@ ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T05:22:26.299Z", + "last_updated": "2025-12-02T21:33:13.269Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": { @@ -2248,12 +2330,12 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" + "value": "69a7745e-dfb8-40a7-b5cb-5da3af0b10b6" }, "_type": "Component", "api_base": { "_input_type": "MessageTextInput", - "advanced": true, + "advanced": false, "display_name": "OpenAI API Base URL", "dynamic": false, "info": "Base URL for the API. Leave empty for default.", @@ -2460,9 +2542,7 @@ "info": "Select the embedding model to use", "name": "model", "options": [ - "text-embedding-3-small", - "text-embedding-3-large", - "text-embedding-ada-002" + "text-embedding-3-small" ], "options_metadata": [], "override_skip": false, @@ -2661,7 +2741,7 @@ "dragging": false, "id": "EmbeddingModel-aIP4U", "measured": { - "height": 369, + "height": 451, "width": 320 }, "position": { @@ -2715,10 +2795,10 @@ ], "frozen": false, "icon": "OpenSearch", - "last_updated": "2025-11-26T05:22:41.532Z", + "last_updated": "2025-12-02T21:33:13.271Z", "legacy": false, "metadata": { - "code_hash": "000397b17863", + "code_hash": "db60433453a8", "dependencies": { "dependencies": [ { @@ -2762,7 +2842,7 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" + "value": "69a7745e-dfb8-40a7-b5cb-5da3af0b10b6" }, "_type": "Component", "auth_mode": { @@ -2830,7 +2910,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n \n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"🔄 Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"Starting ingestion mode...\")\n \n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n \n logger.debug(f\"[INGESTION] ingest_data type: {type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\")\n logger.debug(f\"[INGESTION] ingest_data content: {self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\")\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"✓ Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n \n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n \n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n \n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"✓ Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n max_workers = min(max(len(texts), 1), 8)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(f\"✓ Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\")\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n return [self._get_embedding_model_name()]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n \n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n \n if not embeddings_list:\n logger.error(\"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\")\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models and isinstance(available_models, dict):\n logger.info(f\"Embedding object {idx} provides {len(available_models)} models via available_models dict\")\n for model_name_key, dedicated_embedding in available_models.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n else:\n # No matching embedding found for this model\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available models: {list(embedding_by_model.keys())}\"\n )\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n\n if not query_embeddings:\n msg = \"Failed to generate embeddings for any model\"\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n if self._cached_vector_store is None:\n self.build_vector_store()\n \n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n \n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" + "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n\n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"Starting ingestion mode...\")\n\n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n logger.debug(\n f\"[INGESTION] ingest_data type: \"\n f\"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\"\n )\n logger.debug(\n f\"[INGESTION] ingest_data content: \"\n f\"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\"\n )\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n max_workers = min(max(len(texts), 1), 8)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(\n f\"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\"\n )\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n logger.debug(f\"Model detection query: {agg_query}\")\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n # Log detailed bucket info for debugging\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n if not models:\n total_hits = result.get(\"hits\", {}).get(\"total\", {})\n total_count = total_hits.get(\"value\", 0) if isinstance(total_hits, dict) else total_hits\n logger.warning(\n f\"No embedding_model values found in index '{self.index_name}'. \"\n f\"Total docs in index: {total_count}. \"\n f\"This may indicate documents were indexed without the embedding_model field.\"\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n fallback_model = self._get_embedding_model_name()\n logger.info(f\"Using fallback model: {fallback_model}\")\n return [fallback_model]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n if not embeddings_list:\n logger.error(\n \"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\"\n )\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models_attr}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models_attr and isinstance(available_models_attr, dict):\n logger.info(\n f\"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict\"\n )\n for model_name_key, dedicated_embedding in available_models_attr.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n self.log(f\"[SEARCH] Models detected in index: {available_models}\")\n self.log(f\"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n # Track matching status for debugging\n matched_models = []\n unmatched_models = []\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n matched_models.append(model_name)\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n self.log(f\"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding\")\n else:\n # No matching embedding found for this model\n unmatched_models.append(model_name)\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}\")\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n self.log(f\"[ERROR] Embedding generation failed for '{model_name}': {e}\")\n\n # Log summary of model matching\n logger.info(f\"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n self.log(f\"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n if unmatched_models:\n self.log(f\"[WARN] Unmatched models in index: {unmatched_models}\")\n\n if not query_embeddings:\n msg = (\n f\"Failed to generate embeddings for any model. \"\n f\"Index has models: {available_models}, but no matching embedding objects found. \"\n f\"Available embedding identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[FAIL] Search failed: {msg}\")\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n self.log(f\"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'\")\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n self.log(f\"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping\")\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n self.log(\n f\"[WARN] No valid KNN queries could be built. \"\n f\"Query embeddings generated: {list(query_embeddings.keys())}, \"\n f\"but no matching knn_vector fields found in index.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: \"\n f\"{list(query_embeddings.keys())}\"\n )\n self.log(f\"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}\")\n self.log(f\"[EXEC] Embedding models used: {list(query_embeddings.keys())}\")\n self.log(f\"[EXEC] KNN fields being queried: {embedding_fields}\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n self.log(f\"[RESULT] Search complete: {len(hits)} results found\")\n\n if len(hits) == 0:\n self.log(\n f\"[EMPTY] Debug info: \"\n f\"models_in_index={available_models}, \"\n f\"matched_models={matched_models}, \"\n f\"knn_fields={embedding_fields}, \"\n f\"filters={len(filter_clauses)} clauses\"\n )\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n logger.info(f\"Search query: {self.search_query}\")\n if self._cached_vector_store is None:\n self.build_vector_store()\n\n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n\n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config" }, "docs_metadata": { "_input_type": "TableInput", @@ -3202,7 +3282,7 @@ "trace_as_metadata": true, "track_in_telemetry": false, "type": "query", - "value": "" + "value": "hisense" }, "should_cache_vector_store": { "_input_type": "BoolInput", @@ -3364,7 +3444,7 @@ "info": "", "list": false, "list_add_label": "Add More", - "load_from_db": false, + "load_from_db": true, "name": "username", "override_skip": false, "placeholder": "", @@ -3375,7 +3455,7 @@ "trace_as_metadata": true, "track_in_telemetry": false, "type": "str", - "value": "admin" + "value": "JWT" }, "vector_field": { "_input_type": "StrInput", @@ -3432,10 +3512,10 @@ "width": 320 }, "position": { - "x": 1098.7085719475467, - "y": 1410.4984401198574 + "x": 1010.0149063694566, + "y": 1404.0086597117045 }, - "selected": true, + "selected": false, "type": "genericNode" }, { @@ -3474,7 +3554,7 @@ ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T05:22:26.302Z", + "last_updated": "2025-12-02T21:33:13.271Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": { @@ -3542,7 +3622,7 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" + "value": "69a7745e-dfb8-40a7-b5cb-5da3af0b10b6" }, "_type": "Component", "api_base": { @@ -4001,7 +4081,7 @@ ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T05:22:26.303Z", + "last_updated": "2025-12-02T21:33:13.272Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": { @@ -4069,7 +4149,7 @@ "value": "1098eea1-6649-4e1d-aed1-b77249fb8dd0" }, "_frontend_node_folder_id": { - "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" + "value": "69a7745e-dfb8-40a7-b5cb-5da3af0b10b6" }, "_type": "Component", "api_base": { @@ -4495,12 +4575,210 @@ }, "selected": false, "type": "genericNode" + }, + { + "data": { + "id": "Prompt Template-7kZsI", + "node": { + "base_classes": [ + "Message" + ], + "beta": false, + "conditional_paths": [], + "custom_fields": { + "template": [ + "filter", + "input" + ] + }, + "description": "Create a prompt template with dynamic variables.", + "display_name": "Prompt Template", + "documentation": "https://docs.langflow.org/components-prompts", + "edited": false, + "error": null, + "field_order": [ + "template", + "tool_placeholder" + ], + "frozen": false, + "full_path": null, + "icon": "braces", + "is_composition": null, + "is_input": null, + "is_output": null, + "legacy": false, + "metadata": { + "code_hash": "7382d03ce412", + "dependencies": { + "dependencies": [ + { + "name": "lfx", + "version": "0.2.0.dev21" + } + ], + "total_dependencies": 1 + }, + "module": "lfx.components.models_and_agents.prompt.PromptComponent" + }, + "minimized": false, + "name": "", + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Prompt", + "group_outputs": false, + "hidden": null, + "loop_types": null, + "method": "build_prompt", + "name": "prompt", + "options": null, + "required_inputs": null, + "selected": "Message", + "tool_mode": true, + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "priority": 0, + "replacement": null, + "template": { + "_type": "Component", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "from lfx.base.prompts.api_utils import process_prompt_template\nfrom lfx.custom.custom_component.component import Component\nfrom lfx.inputs.inputs import DefaultPromptField\nfrom lfx.io import MessageTextInput, Output, PromptInput\nfrom lfx.schema.message import Message\nfrom lfx.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt Template\"\n description: str = \"Create a prompt template with dynamic variables.\"\n documentation: str = \"https://docs.langflow.org/components-prompts\"\n icon = \"braces\"\n trace_type = \"prompt\"\n name = \"Prompt Template\"\n priority = 0 # Set priority to 0 to make it appear first\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n" + }, + "filter": { + "advanced": false, + "display_name": "filter", + "dynamic": false, + "field_type": "str", + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Message" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "filter", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "input": { + "advanced": false, + "display_name": "input", + "dynamic": false, + "field_type": "str", + "fileTypes": [], + "file_path": "", + "info": "", + "input_types": [ + "Message" + ], + "list": false, + "load_from_db": false, + "multiline": true, + "name": "input", + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "type": "str", + "value": "" + }, + "template": { + "_input_type": "PromptInput", + "advanced": false, + "display_name": "Template", + "dynamic": false, + "info": "", + "list": false, + "list_add_label": "Add More", + "name": "template", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_input": true, + "track_in_telemetry": false, + "type": "prompt", + "value": "This is Knowledge filter - use it as a context of what to search on the database, unless it's empty: {filter}\n\nChat input: {input}" + }, + "tool_placeholder": { + "_input_type": "MessageTextInput", + "advanced": true, + "display_name": "Tool Placeholder", + "dynamic": false, + "info": "A placeholder input for tool mode.", + "input_types": [ + "Message" + ], + "list": false, + "list_add_label": "Add More", + "load_from_db": false, + "name": "tool_placeholder", + "override_skip": false, + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": true, + "trace_as_input": true, + "trace_as_metadata": true, + "track_in_telemetry": false, + "type": "str", + "value": "" + } + }, + "tool_mode": false + }, + "showNode": true, + "type": "Prompt Template" + }, + "dragging": false, + "id": "Prompt Template-7kZsI", + "measured": { + "height": 435, + "width": 320 + }, + "position": { + "x": 1496.7915215019298, + "y": 2019.0970114283145 + }, + "selected": false, + "type": "genericNode" } ], "viewport": { - "x": -159.31786606392757, - "y": -442.1474480017346, - "zoom": 0.5404166566474254 + "x": 3.4342547318133256, + "y": -319.63689114007093, + "zoom": 0.4319912925345232 } }, "description": "OpenRAG OpenSearch Agent", diff --git a/flows/openrag_nudges.json b/flows/openrag_nudges.json index 5fe01022..8f19e199 100644 --- a/flows/openrag_nudges.json +++ b/flows/openrag_nudges.json @@ -3,7 +3,7 @@ "edges": [ { "animated": false, - "className": "", + "className": "not-running", "data": { "sourceHandle": { "dataType": "ParserComponent", @@ -199,34 +199,6 @@ "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3", "targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3œ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}" }, - { - "animated": false, - "className": "", - "data": { - "sourceHandle": { - "dataType": "TextInput", - "id": "TextInput-4cEHx", - "name": "text", - "output_types": [ - "Message" - ] - }, - "targetHandle": { - "fieldName": "filter_expression", - "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3", - "inputTypes": [ - "Message" - ], - "type": "str" - } - }, - "id": "xy-edge__TextInput-4cEHx{œdataTypeœ:œTextInputœ,œidœ:œTextInput-4cEHxœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3{œfieldNameœ:œfilter_expressionœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", - "selected": false, - "source": "TextInput-4cEHx", - "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-4cEHxœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", - "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3", - "targetHandle": "{œfieldNameœ:œfilter_expressionœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" - }, { "animated": false, "className": "", @@ -255,6 +227,33 @@ "sourceHandle": "{œdataTypeœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbeddingœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3œ,œnameœ:œdataframeœ,œoutput_typesœ:[œDataFrameœ]}", "target": "ParserComponent-tZs7s", "targetHandle": "{œfieldNameœ:œinput_dataœ,œidœ:œParserComponent-tZs7sœ,œinputTypesœ:[œDataFrameœ,œDataœ],œtypeœ:œotherœ}" + }, + { + "animated": false, + "data": { + "sourceHandle": { + "dataType": "TextInput", + "id": "TextInput-4cEHx", + "name": "text", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "filter_expression", + "id": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3", + "inputTypes": [ + "Message" + ], + "type": "str" + } + }, + "id": "xy-edge__TextInput-4cEHx{œdataTypeœ:œTextInputœ,œidœ:œTextInput-4cEHxœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3{œfieldNameœ:œfilter_expressionœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "selected": false, + "source": "TextInput-4cEHx", + "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-4cEHxœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "target": "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3", + "targetHandle": "{œfieldNameœ:œfilter_expressionœ,œidœ:œOpenSearchVectorStoreComponentMultimodalMultiEmbedding-0ByE3œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" } ], "nodes": [ @@ -284,6 +283,7 @@ "frozen": false, "icon": "braces", "legacy": false, + "lf_version": "1.7.0.dev21", "metadata": { "code_hash": "7382d03ce412", "dependencies": { @@ -470,6 +470,7 @@ "frozen": false, "icon": "braces", "legacy": false, + "lf_version": "1.7.0.dev21", "metadata": { "code_hash": "3cda25c3f7b5", "dependencies": { @@ -671,6 +672,7 @@ "frozen": false, "icon": "MessagesSquare", "legacy": false, + "lf_version": "1.7.0.dev21", "metadata": { "code_hash": "7a26c54d89ed", "dependencies": { @@ -1271,8 +1273,9 @@ ], "frozen": false, "icon": "brain-circuit", - "last_updated": "2025-11-26T05:25:03.272Z", + "last_updated": "2025-12-02T21:32:07.567Z", "legacy": false, + "lf_version": "1.7.0.dev21", "metadata": { "code_hash": "694ffc4b17b8", "dependencies": { @@ -1361,7 +1364,7 @@ "value": "ebc01d31-1976-46ce-a385-b0240327226c" }, "_frontend_node_folder_id": { - "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" + "value": "69a7745e-dfb8-40a7-b5cb-5da3af0b10b6" }, "_type": "Component", "api_key": { @@ -1465,7 +1468,26 @@ "info": "Select the model to use", "name": "model_name", "options": [ - "gpt-4o" + "gpt-4o-mini", + "gpt-4o", + "gpt-4.1", + "gpt-4.1-mini", + "gpt-4.1-nano", + "gpt-4-turbo", + "gpt-4-turbo-preview", + "gpt-4", + "gpt-3.5-turbo", + "gpt-5.1", + "gpt-5", + "gpt-5-mini", + "gpt-5-nano", + "gpt-5-chat-latest", + "o1", + "o3-mini", + "o3", + "o3-pro", + "o4-mini", + "o4-mini-high" ], "options_metadata": [], "placeholder": "", @@ -1478,7 +1500,7 @@ "tool_mode": false, "trace_as_metadata": true, "type": "str", - "value": "gpt-4o" + "value": "gpt-4o-mini" }, "ollama_base_url": { "_input_type": "MessageTextInput", @@ -1560,6 +1582,9 @@ "placeholder": "", "real_time_refresh": true, "required": false, + "selected_metadata": { + "icon": "OpenAI" + }, "show": true, "title_case": false, "toggle": false, @@ -1767,8 +1792,8 @@ "width": 320 }, "position": { - "x": -381.0467835456134, - "y": 1685.1023245166507 + "x": -262.4138400422388, + "y": 1630.3486582843238 }, "selected": false, "type": "genericNode" @@ -1809,7 +1834,7 @@ ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T05:25:03.274Z", + "last_updated": "2025-12-02T21:24:52.479Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": { @@ -1877,12 +1902,12 @@ "value": "ebc01d31-1976-46ce-a385-b0240327226c" }, "_frontend_node_folder_id": { - "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" + "value": "69a7745e-dfb8-40a7-b5cb-5da3af0b10b6" }, "_type": "Component", "api_base": { "_input_type": "MessageTextInput", - "advanced": true, + "advanced": false, "display_name": "OpenAI API Base URL", "dynamic": false, "info": "Base URL for the API. Leave empty for default.", @@ -2089,9 +2114,7 @@ "info": "Select the embedding model to use", "name": "model", "options": [ - "text-embedding-3-small", - "text-embedding-3-large", - "text-embedding-ada-002" + "text-embedding-3-small" ], "options_metadata": [], "override_skip": false, @@ -2290,7 +2313,7 @@ "dragging": false, "id": "EmbeddingModel-ooLFP", "measured": { - "height": 369, + "height": 451, "width": 320 }, "position": { @@ -2344,10 +2367,11 @@ ], "frozen": false, "icon": "OpenSearch", - "last_updated": "2025-11-26T05:25:26.304Z", + "last_updated": "2025-12-02T21:30:53.860Z", "legacy": false, + "lf_version": "1.7.0.dev21", "metadata": { - "code_hash": "000397b17863", + "code_hash": "db60433453a8", "dependencies": { "dependencies": [ { @@ -2427,7 +2451,7 @@ "value": "ebc01d31-1976-46ce-a385-b0240327226c" }, "_frontend_node_folder_id": { - "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" + "value": "69a7745e-dfb8-40a7-b5cb-5da3af0b10b6" }, "_type": "Component", "auth_mode": { @@ -2495,7 +2519,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n \n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"🔄 Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"Starting ingestion mode...\")\n \n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n \n logger.debug(f\"[INGESTION] ingest_data type: {type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\")\n logger.debug(f\"[INGESTION] ingest_data content: {self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\")\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"✓ Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n \n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n \n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n \n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"✓ Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n max_workers = min(max(len(texts), 1), 8)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(f\"✓ Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\")\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n return [self._get_embedding_model_name()]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n \n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n \n if not embeddings_list:\n logger.error(\"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\")\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models and isinstance(available_models, dict):\n logger.info(f\"Embedding object {idx} provides {len(available_models)} models via available_models dict\")\n for model_name_key, dedicated_embedding in available_models.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n else:\n # No matching embedding found for this model\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available models: {list(embedding_by_model.keys())}\"\n )\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n\n if not query_embeddings:\n msg = \"Failed to generate embeddings for any model\"\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n if self._cached_vector_store is None:\n self.build_vector_store()\n \n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n \n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" + "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n\n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"Starting ingestion mode...\")\n\n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n logger.debug(\n f\"[INGESTION] ingest_data type: \"\n f\"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\"\n )\n logger.debug(\n f\"[INGESTION] ingest_data content: \"\n f\"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\"\n )\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n max_workers = min(max(len(texts), 1), 8)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(\n f\"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\"\n )\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n logger.debug(f\"Model detection query: {agg_query}\")\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n # Log detailed bucket info for debugging\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n if not models:\n total_hits = result.get(\"hits\", {}).get(\"total\", {})\n total_count = total_hits.get(\"value\", 0) if isinstance(total_hits, dict) else total_hits\n logger.warning(\n f\"No embedding_model values found in index '{self.index_name}'. \"\n f\"Total docs in index: {total_count}. \"\n f\"This may indicate documents were indexed without the embedding_model field.\"\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n fallback_model = self._get_embedding_model_name()\n logger.info(f\"Using fallback model: {fallback_model}\")\n return [fallback_model]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n if not embeddings_list:\n logger.error(\n \"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\"\n )\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models_attr}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models_attr and isinstance(available_models_attr, dict):\n logger.info(\n f\"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict\"\n )\n for model_name_key, dedicated_embedding in available_models_attr.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n self.log(f\"[SEARCH] Models detected in index: {available_models}\")\n self.log(f\"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n # Track matching status for debugging\n matched_models = []\n unmatched_models = []\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n matched_models.append(model_name)\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n self.log(f\"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding\")\n else:\n # No matching embedding found for this model\n unmatched_models.append(model_name)\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}\")\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n self.log(f\"[ERROR] Embedding generation failed for '{model_name}': {e}\")\n\n # Log summary of model matching\n logger.info(f\"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n self.log(f\"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n if unmatched_models:\n self.log(f\"[WARN] Unmatched models in index: {unmatched_models}\")\n\n if not query_embeddings:\n msg = (\n f\"Failed to generate embeddings for any model. \"\n f\"Index has models: {available_models}, but no matching embedding objects found. \"\n f\"Available embedding identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[FAIL] Search failed: {msg}\")\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n self.log(f\"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'\")\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n self.log(f\"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping\")\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n self.log(\n f\"[WARN] No valid KNN queries could be built. \"\n f\"Query embeddings generated: {list(query_embeddings.keys())}, \"\n f\"but no matching knn_vector fields found in index.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: \"\n f\"{list(query_embeddings.keys())}\"\n )\n self.log(f\"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}\")\n self.log(f\"[EXEC] Embedding models used: {list(query_embeddings.keys())}\")\n self.log(f\"[EXEC] KNN fields being queried: {embedding_fields}\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n self.log(f\"[RESULT] Search complete: {len(hits)} results found\")\n\n if len(hits) == 0:\n self.log(\n f\"[EMPTY] Debug info: \"\n f\"models_in_index={available_models}, \"\n f\"matched_models={matched_models}, \"\n f\"knn_fields={embedding_fields}, \"\n f\"filters={len(filter_clauses)} clauses\"\n )\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n logger.info(f\"Search query: {self.search_query}\")\n if self._cached_vector_store is None:\n self.build_vector_store()\n\n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n\n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config" }, "docs_metadata": { "_input_type": "TableInput", @@ -2657,7 +2681,7 @@ "trace_as_metadata": true, "track_in_telemetry": false, "type": "str", - "value": "" + "value": "{}" }, "index_name": { "_input_type": "StrInput", @@ -2742,7 +2766,7 @@ "title_case": false, "track_in_telemetry": false, "type": "str", - "value": "OWNER_NAME" + "value": "JWT" }, "m": { "_input_type": "IntInput", @@ -2842,7 +2866,7 @@ "title_case": false, "track_in_telemetry": false, "type": "str", - "value": "" + "value": "epC8FOOeq3$3t*VB" }, "search_query": { "_input_type": "QueryInput", @@ -2867,7 +2891,7 @@ "trace_as_metadata": true, "track_in_telemetry": false, "type": "query", - "value": "" + "value": "\"\"" }, "should_cache_vector_store": { "_input_type": "BoolInput", @@ -3018,7 +3042,7 @@ "x": 387.88180968996585, "y": 879.9328678310967 }, - "selected": true, + "selected": false, "type": "genericNode" }, { @@ -3057,7 +3081,7 @@ ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T05:25:03.275Z", + "last_updated": "2025-12-02T21:24:52.480Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": { @@ -3125,7 +3149,7 @@ "value": "ebc01d31-1976-46ce-a385-b0240327226c" }, "_frontend_node_folder_id": { - "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" + "value": "69a7745e-dfb8-40a7-b5cb-5da3af0b10b6" }, "_type": "Component", "api_base": { @@ -3582,7 +3606,7 @@ ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T05:25:03.275Z", + "last_updated": "2025-12-02T21:24:52.481Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": { @@ -3650,7 +3674,7 @@ "value": "ebc01d31-1976-46ce-a385-b0240327226c" }, "_frontend_node_folder_id": { - "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" + "value": "69a7745e-dfb8-40a7-b5cb-5da3af0b10b6" }, "_type": "Component", "api_base": { @@ -4071,17 +4095,17 @@ "width": 320 }, "position": { - "x": -390.97188504852653, - "y": 887.8565162649519 + "x": -327.0926077774787, + "y": 909.1496086886345 }, "selected": false, "type": "genericNode" } ], "viewport": { - "x": 493.20780895738403, - "y": -153.25104936517073, - "zoom": 0.502446306646234 + "x": 470.8619752966712, + "y": -133.63346875696777, + "zoom": 0.36224591795587874 } }, "description": "OpenRAG OpenSearch Nudges generator, based on the OpenSearch documents and the chat history.", diff --git a/frontend/app/api/mutations/useOnboardingMutation.ts b/frontend/app/api/mutations/useOnboardingMutation.ts index 6c8e2335..42b95236 100644 --- a/frontend/app/api/mutations/useOnboardingMutation.ts +++ b/frontend/app/api/mutations/useOnboardingMutation.ts @@ -3,6 +3,7 @@ import { useMutation, useQueryClient, } from "@tanstack/react-query"; +import { ONBOARDING_OPENRAG_DOCS_FILTER_ID_KEY } from "@/lib/constants"; export interface OnboardingVariables { // Provider selection @@ -28,6 +29,7 @@ export interface OnboardingVariables { interface OnboardingResponse { message: string; edited: boolean; + openrag_docs_filter_id?: string; } export const useOnboardingMutation = ( @@ -59,6 +61,15 @@ export const useOnboardingMutation = ( return useMutation({ mutationFn: submitOnboarding, + onSuccess: (data) => { + // Store OpenRAG Docs filter ID if returned + if (data.openrag_docs_filter_id && typeof window !== "undefined") { + localStorage.setItem( + ONBOARDING_OPENRAG_DOCS_FILTER_ID_KEY, + data.openrag_docs_filter_id + ); + } + }, onSettled: () => { // Invalidate settings query to refetch updated data queryClient.invalidateQueries({ queryKey: ["settings"] }); diff --git a/frontend/app/api/queries/useDoclingHealthQuery.ts b/frontend/app/api/queries/useDoclingHealthQuery.ts index 01441f4b..b41effd4 100644 --- a/frontend/app/api/queries/useDoclingHealthQuery.ts +++ b/frontend/app/api/queries/useDoclingHealthQuery.ts @@ -60,9 +60,9 @@ export const useDoclingHealthQuery = ( // If healthy, check every 30 seconds; otherwise check every 3 seconds return query.state.data?.status === "healthy" ? 30000 : 3000; }, - refetchOnWindowFocus: true, + refetchOnWindowFocus: false, // Disabled to reduce unnecessary calls on tab switches refetchOnMount: true, - staleTime: 30000, // Consider data stale after 25 seconds + staleTime: 30000, // Consider data fresh for 30 seconds ...options, }, queryClient, diff --git a/frontend/app/api/queries/useGetConversationsQuery.ts b/frontend/app/api/queries/useGetConversationsQuery.ts index f7e579b3..d77b7eff 100644 --- a/frontend/app/api/queries/useGetConversationsQuery.ts +++ b/frontend/app/api/queries/useGetConversationsQuery.ts @@ -51,13 +51,15 @@ export const useGetConversationsQuery = ( ) => { const queryClient = useQueryClient(); - async function getConversations(): Promise { + async function getConversations(context: { signal?: AbortSignal }): Promise { try { // Fetch from the selected endpoint only const apiEndpoint = endpoint === "chat" ? "/api/chat/history" : "/api/langflow/history"; - const response = await fetch(apiEndpoint); + const response = await fetch(apiEndpoint, { + signal: context.signal, + }); if (!response.ok) { console.error(`Failed to fetch conversations: ${response.status}`); @@ -84,6 +86,10 @@ export const useGetConversationsQuery = ( return conversations; } catch (error) { + // Ignore abort errors - these are expected when requests are cancelled + if (error instanceof Error && error.name === 'AbortError') { + return []; + } console.error(`Failed to fetch ${endpoint} conversations:`, error); return []; } @@ -94,8 +100,11 @@ export const useGetConversationsQuery = ( queryKey: ["conversations", endpoint, refreshTrigger], placeholderData: (prev) => prev, queryFn: getConversations, - staleTime: 0, // Always consider data stale to ensure fresh data on trigger changes + staleTime: 5000, // Consider data fresh for 5 seconds to prevent excessive refetching gcTime: 5 * 60 * 1000, // Keep in cache for 5 minutes + networkMode: 'always', // Ensure requests can be cancelled + refetchOnMount: false, // Don't refetch on every mount + refetchOnWindowFocus: false, // Don't refetch when window regains focus ...options, }, queryClient, diff --git a/frontend/app/api/queries/useGetFilterByIdQuery.ts b/frontend/app/api/queries/useGetFilterByIdQuery.ts new file mode 100644 index 00000000..353b3153 --- /dev/null +++ b/frontend/app/api/queries/useGetFilterByIdQuery.ts @@ -0,0 +1,21 @@ +import type { KnowledgeFilter } from "./useGetFiltersSearchQuery"; + +export async function getFilterById( + filterId: string +): Promise { + try { + const response = await fetch(`/api/knowledge-filter/${filterId}`, { + method: "GET", + headers: { "Content-Type": "application/json" }, + }); + + const json = await response.json(); + if (!response.ok || !json.success) { + return null; + } + return json.filter as KnowledgeFilter; + } catch (error) { + console.error("Failed to fetch filter by ID:", error); + return null; + } +} diff --git a/frontend/app/api/queries/useGetNudgesQuery.ts b/frontend/app/api/queries/useGetNudgesQuery.ts index 45ef61e7..05c97bde 100644 --- a/frontend/app/api/queries/useGetNudgesQuery.ts +++ b/frontend/app/api/queries/useGetNudgesQuery.ts @@ -34,7 +34,7 @@ export const useGetNudgesQuery = ( }); } - async function getNudges(): Promise { + async function getNudges(context: { signal?: AbortSignal }): Promise { try { const requestBody: { filters?: NudgeFilters; @@ -58,6 +58,7 @@ export const useGetNudgesQuery = ( "Content-Type": "application/json", }, body: JSON.stringify(requestBody), + signal: context.signal, }); const data = await response.json(); @@ -67,6 +68,10 @@ export const useGetNudgesQuery = ( return DEFAULT_NUDGES; } catch (error) { + // Ignore abort errors - these are expected when requests are cancelled + if (error instanceof Error && error.name === 'AbortError') { + return DEFAULT_NUDGES; + } console.error("Error getting nudges", error); return DEFAULT_NUDGES; } @@ -76,6 +81,10 @@ export const useGetNudgesQuery = ( { queryKey: ["nudges", chatId, filters, limit, scoreThreshold], queryFn: getNudges, + staleTime: 10000, // Consider data fresh for 10 seconds to prevent rapid refetching + networkMode: 'always', // Ensure requests can be cancelled + refetchOnMount: false, // Don't refetch on every mount + refetchOnWindowFocus: false, // Don't refetch when window regains focus refetchInterval: (query) => { // If data is empty, refetch every 5 seconds const data = query.state.data; diff --git a/frontend/app/api/queries/useGetSearchQuery.ts b/frontend/app/api/queries/useGetSearchQuery.ts index 1f2cceb2..972f8f16 100644 --- a/frontend/app/api/queries/useGetSearchQuery.ts +++ b/frontend/app/api/queries/useGetSearchQuery.ts @@ -127,6 +127,12 @@ export const useGetSearchQuery = ( }, body: JSON.stringify(searchPayload), }); + + if (!response.ok) { + const errorData = await response.json().catch(() => ({ error: "Unknown error" })); + throw new Error(errorData.error || `Search failed with status ${response.status}`); + } + const data = await response.json(); // Group chunks by filename to create file results similar to page.tsx const fileMap = new Map< @@ -198,7 +204,8 @@ export const useGetSearchQuery = ( return files; } catch (error) { console.error("Error getting files", error); - return []; + // Re-throw the error so React Query can handle it and trigger onError callbacks + throw error; } } @@ -207,6 +214,7 @@ export const useGetSearchQuery = ( queryKey: ["search", queryData, query], placeholderData: (prev) => prev, queryFn: getFiles, + retry: false, // Don't retry on errors - show them immediately ...options, }, queryClient, diff --git a/frontend/app/api/queries/useProviderHealthQuery.ts b/frontend/app/api/queries/useProviderHealthQuery.ts index 82ca2db2..5cd86450 100644 --- a/frontend/app/api/queries/useProviderHealthQuery.ts +++ b/frontend/app/api/queries/useProviderHealthQuery.ts @@ -96,9 +96,9 @@ export const useProviderHealthQuery = ( // If healthy, check every 30 seconds; otherwise check every 3 seconds return query.state.data?.status === "healthy" ? 30000 : 3000; }, - refetchOnWindowFocus: true, + refetchOnWindowFocus: false, // Disabled to reduce unnecessary calls on tab switches refetchOnMount: true, - staleTime: 30000, // Consider data stale after 25 seconds + staleTime: 30000, // Consider data fresh for 30 seconds enabled: !!settings?.edited && options?.enabled !== false, // Only run after onboarding is complete ...options, }, diff --git a/frontend/app/chat/page.tsx b/frontend/app/chat/page.tsx index 9594a0ea..358424f3 100644 --- a/frontend/app/chat/page.tsx +++ b/frontend/app/chat/page.tsx @@ -110,6 +110,13 @@ function ChatPage() { } else { refreshConversationsSilent(); } + + // Save filter association for this response + if (conversationFilter && typeof window !== "undefined") { + const newKey = `conversation_filter_${responseId}`; + localStorage.setItem(newKey, conversationFilter.id); + console.log("[CHAT] Saved filter association:", newKey, "=", conversationFilter.id); + } } }, onError: (error) => { @@ -696,11 +703,18 @@ function ChatPage() { // Use passed previousResponseId if available, otherwise fall back to state const responseIdToUse = previousResponseId || previousResponseIds[endpoint]; + console.log("[CHAT] Sending streaming message:", { + conversationFilter: conversationFilter?.id, + currentConversationId, + responseIdToUse, + }); + // Use the hook to send the message await sendStreamingMessage({ prompt: userMessage.content, previousResponseId: responseIdToUse || undefined, filters: processedFilters, + filter_id: conversationFilter?.id, // ✅ Add filter_id for this conversation limit: parsedFilterData?.limit ?? 10, scoreThreshold: parsedFilterData?.scoreThreshold ?? 0, }); @@ -781,6 +795,19 @@ function ChatPage() { requestBody.previous_response_id = currentResponseId; } + // Add filter_id if a filter is selected for this conversation + if (conversationFilter) { + requestBody.filter_id = conversationFilter.id; + } + + // Debug logging + console.log("[DEBUG] Sending message with:", { + previous_response_id: requestBody.previous_response_id, + filter_id: requestBody.filter_id, + currentConversationId, + previousResponseIds, + }); + const response = await fetch(apiEndpoint, { method: "POST", headers: { @@ -804,6 +831,8 @@ function ChatPage() { // Store the response ID if present for this endpoint if (result.response_id) { + console.log("[DEBUG] Received response_id:", result.response_id, "currentConversationId:", currentConversationId); + setPreviousResponseIds((prev) => ({ ...prev, [endpoint]: result.response_id, @@ -811,12 +840,21 @@ function ChatPage() { // If this is a new conversation (no currentConversationId), set it now if (!currentConversationId) { + console.log("[DEBUG] Setting currentConversationId to:", result.response_id); setCurrentConversationId(result.response_id); refreshConversations(true); } else { + console.log("[DEBUG] Existing conversation, doing silent refresh"); // For existing conversations, do a silent refresh to keep backend in sync refreshConversationsSilent(); } + + // Carry forward the filter association to the new response_id + if (conversationFilter && typeof window !== "undefined") { + const newKey = `conversation_filter_${result.response_id}`; + localStorage.setItem(newKey, conversationFilter.id); + console.log("[DEBUG] Saved filter association:", newKey, "=", conversationFilter.id); + } } } else { console.error("Chat failed:", result.error); diff --git a/frontend/app/knowledge/page.tsx b/frontend/app/knowledge/page.tsx index 9a526159..4a26b38f 100644 --- a/frontend/app/knowledge/page.tsx +++ b/frontend/app/knowledge/page.tsx @@ -75,6 +75,7 @@ function SearchPage() { const { parsedFilterData, queryOverride } = useKnowledgeFilter(); const [selectedRows, setSelectedRows] = useState([]); const [showBulkDeleteDialog, setShowBulkDeleteDialog] = useState(false); + const lastErrorRef = useRef(null); const deleteDocumentMutation = useDeleteDocument(); @@ -82,10 +83,28 @@ function SearchPage() { refreshTasks(); }, [refreshTasks]); - const { data: searchData = [], isFetching } = useGetSearchQuery( + const { data: searchData = [], isFetching, error, isError } = useGetSearchQuery( queryOverride, parsedFilterData, ); + + // Show toast notification for search errors + useEffect(() => { + if (isError && error) { + const errorMessage = error instanceof Error ? error.message : "Search failed"; + // Avoid showing duplicate toasts for the same error + if (lastErrorRef.current !== errorMessage) { + lastErrorRef.current = errorMessage; + toast.error("Search error", { + description: errorMessage, + duration: 5000, + }); + } + } else if (!isError) { + // Reset when query succeeds + lastErrorRef.current = null; + } + }, [isError, error]); // Convert TaskFiles to File format and merge with backend results const taskFilesAsFiles: File[] = taskFiles.map((taskFile) => { return { diff --git a/frontend/app/onboarding/_components/onboarding-card.tsx b/frontend/app/onboarding/_components/onboarding-card.tsx index 7ac2e85c..7c257088 100644 --- a/frontend/app/onboarding/_components/onboarding-card.tsx +++ b/frontend/app/onboarding/_components/onboarding-card.tsx @@ -209,6 +209,16 @@ const OnboardingCard = ({ const onboardingMutation = useOnboardingMutation({ onSuccess: (data) => { console.log("Onboarding completed successfully", data); + + // Save OpenRAG docs filter ID if sample data was ingested + if (data.openrag_docs_filter_id && typeof window !== "undefined") { + localStorage.setItem( + "onboarding_openrag_docs_filter_id", + data.openrag_docs_filter_id + ); + console.log("Saved OpenRAG docs filter ID:", data.openrag_docs_filter_id); + } + // Update provider health cache to healthy since backend just validated const provider = (isEmbedding ? settings.embedding_provider : settings.llm_provider) || diff --git a/frontend/app/onboarding/_components/onboarding-content.tsx b/frontend/app/onboarding/_components/onboarding-content.tsx index ee47f347..7473a916 100644 --- a/frontend/app/onboarding/_components/onboarding-content.tsx +++ b/frontend/app/onboarding/_components/onboarding-content.tsx @@ -2,20 +2,30 @@ import { useEffect, useRef, useState } from "react"; import { StickToBottom } from "use-stick-to-bottom"; +import { getFilterById } from "@/app/api/queries/useGetFilterByIdQuery"; import { AssistantMessage } from "@/app/chat/_components/assistant-message"; import Nudges from "@/app/chat/_components/nudges"; import { UserMessage } from "@/app/chat/_components/user-message"; -import type { Message } from "@/app/chat/_types/types"; +import type { Message, SelectedFilters } from "@/app/chat/_types/types"; import OnboardingCard from "@/app/onboarding/_components/onboarding-card"; +import { useChat } from "@/contexts/chat-context"; import { useChatStreaming } from "@/hooks/useChatStreaming"; import { ONBOARDING_ASSISTANT_MESSAGE_KEY, + ONBOARDING_OPENRAG_DOCS_FILTER_ID_KEY, ONBOARDING_SELECTED_NUDGE_KEY, } from "@/lib/constants"; import { OnboardingStep } from "./onboarding-step"; import OnboardingUpload from "./onboarding-upload"; +// Filters for OpenRAG documentation +const OPENRAG_DOCS_FILTERS: SelectedFilters = { + data_sources: ["openrag-documentation.pdf"], + document_types: [], + owners: [], +}; + export function OnboardingContent({ handleStepComplete, handleStepBack, @@ -25,6 +35,7 @@ export function OnboardingContent({ handleStepBack: () => void; currentStep: number; }) { + const { setConversationFilter, setCurrentConversationId } = useChat(); const parseFailedRef = useRef(false); const [responseId, setResponseId] = useState(null); const [selectedNudge, setSelectedNudge] = useState(() => { @@ -70,7 +81,7 @@ export function OnboardingContent({ }, [handleStepBack, currentStep]); const { streamingMessage, isLoading, sendMessage } = useChatStreaming({ - onComplete: (message, newResponseId) => { + onComplete: async (message, newResponseId) => { setAssistantMessage(message); // Save assistant message to localStorage when complete if (typeof window !== "undefined") { @@ -88,6 +99,26 @@ export function OnboardingContent({ } if (newResponseId) { setResponseId(newResponseId); + + // Set the current conversation ID + setCurrentConversationId(newResponseId); + + // Save the filter association for this conversation + const openragDocsFilterId = localStorage.getItem(ONBOARDING_OPENRAG_DOCS_FILTER_ID_KEY); + if (openragDocsFilterId) { + try { + // Load the filter and set it in the context with explicit responseId + // This ensures the filter is saved to localStorage with the correct conversation ID + const filter = await getFilterById(openragDocsFilterId); + if (filter) { + // Pass explicit newResponseId to ensure correct localStorage association + setConversationFilter(filter, newResponseId); + console.log("[ONBOARDING] Saved filter association:", `conversation_filter_${newResponseId}`, "=", openragDocsFilterId); + } + } catch (error) { + console.error("Failed to associate filter with conversation:", error); + } + } } }, onError: (error) => { @@ -115,9 +146,36 @@ export function OnboardingContent({ localStorage.removeItem(ONBOARDING_ASSISTANT_MESSAGE_KEY); } setTimeout(async () => { + // Check if we have the OpenRAG docs filter ID (sample data was ingested) + const openragDocsFilterId = + typeof window !== "undefined" + ? localStorage.getItem(ONBOARDING_OPENRAG_DOCS_FILTER_ID_KEY) + : null; + + // Load and set the OpenRAG docs filter if available + let filterToUse = null; + console.log("[ONBOARDING] openragDocsFilterId:", openragDocsFilterId); + if (openragDocsFilterId) { + try { + const filter = await getFilterById(openragDocsFilterId); + console.log("[ONBOARDING] Loaded filter:", filter); + if (filter) { + // Pass null to skip localStorage save - no conversation exists yet + setConversationFilter(filter, null); + filterToUse = filter; + } + } catch (error) { + console.error("Failed to load OpenRAG docs filter:", error); + } + } + + console.log("[ONBOARDING] Sending message with filter_id:", filterToUse?.id); await sendMessage({ prompt: nudge, previousResponseId: responseId || undefined, + // Send both filter_id and filters (selections) + filter_id: filterToUse?.id, + filters: openragDocsFilterId ? OPENRAG_DOCS_FILTERS : undefined, }); }, 1500); }; diff --git a/frontend/app/onboarding/_components/onboarding-upload.tsx b/frontend/app/onboarding/_components/onboarding-upload.tsx index 3855ff83..263af7b7 100644 --- a/frontend/app/onboarding/_components/onboarding-upload.tsx +++ b/frontend/app/onboarding/_components/onboarding-upload.tsx @@ -1,10 +1,15 @@ import { AnimatePresence, motion } from "motion/react"; import { type ChangeEvent, useEffect, useRef, useState } from "react"; +import { toast } from "sonner"; +import { useCreateFilter } from "@/app/api/mutations/useCreateFilter"; import { useGetNudgesQuery } from "@/app/api/queries/useGetNudgesQuery"; import { useGetTasksQuery } from "@/app/api/queries/useGetTasksQuery"; import { AnimatedProviderSteps } from "@/app/onboarding/_components/animated-provider-steps"; import { Button } from "@/components/ui/button"; -import { ONBOARDING_UPLOAD_STEPS_KEY } from "@/lib/constants"; +import { + ONBOARDING_UPLOAD_STEPS_KEY, + ONBOARDING_USER_DOC_FILTER_ID_KEY, +} from "@/lib/constants"; import { uploadFile } from "@/lib/upload-utils"; interface OnboardingUploadProps { @@ -15,6 +20,11 @@ const OnboardingUpload = ({ onComplete }: OnboardingUploadProps) => { const fileInputRef = useRef(null); const [isUploading, setIsUploading] = useState(false); const [currentStep, setCurrentStep] = useState(null); + const [uploadedFilename, setUploadedFilename] = useState(null); + const [shouldCreateFilter, setShouldCreateFilter] = useState(false); + const [isCreatingFilter, setIsCreatingFilter] = useState(false); + + const createFilterMutation = useCreateFilter(); const STEP_LIST = [ "Uploading your document", @@ -53,6 +63,60 @@ const OnboardingUpload = ({ onComplete }: OnboardingUploadProps) => { // Set to final step to show "Done" setCurrentStep(STEP_LIST.length); + // Create knowledge filter for uploaded document if requested + // Guard against race condition: only create if not already creating + if (shouldCreateFilter && uploadedFilename && !isCreatingFilter) { + // Reset flags immediately (synchronously) to prevent duplicate creation + setShouldCreateFilter(false); + const filename = uploadedFilename; + setUploadedFilename(null); + setIsCreatingFilter(true); + + // Get display name from filename (remove extension for cleaner name) + const displayName = filename.includes(".") + ? filename.substring(0, filename.lastIndexOf(".")) + : filename; + + const queryData = JSON.stringify({ + query: "", + filters: { + data_sources: [filename], + document_types: ["*"], + owners: ["*"], + connector_types: ["*"], + }, + limit: 10, + scoreThreshold: 0, + color: "green", + icon: "file", + }); + + createFilterMutation + .mutateAsync({ + name: displayName, + description: `Filter for ${filename}`, + queryData: queryData, + }) + .then((result) => { + if (result.filter?.id && typeof window !== "undefined") { + localStorage.setItem( + ONBOARDING_USER_DOC_FILTER_ID_KEY, + result.filter.id, + ); + console.log( + "Created knowledge filter for uploaded document", + result.filter.id, + ); + } + }) + .catch((error) => { + console.error("Failed to create knowledge filter:", error); + }) + .finally(() => { + setIsCreatingFilter(false); + }); + } + // Refetch nudges to get new ones refetchNudges(); @@ -61,7 +125,7 @@ const OnboardingUpload = ({ onComplete }: OnboardingUploadProps) => { onComplete(); }, 1000); } - }, [tasks, currentStep, onComplete, refetchNudges]); + }, [tasks, currentStep, onComplete, refetchNudges, shouldCreateFilter, uploadedFilename]); const resetFileInput = () => { if (fileInputRef.current) { @@ -77,14 +141,29 @@ const OnboardingUpload = ({ onComplete }: OnboardingUploadProps) => { setIsUploading(true); try { setCurrentStep(0); - await uploadFile(file, true); + const result = await uploadFile(file, true, true); // Pass createFilter=true console.log("Document upload task started successfully"); + + // Store filename and createFilter flag in state to create filter after ingestion succeeds + if (result.createFilter && result.filename) { + setUploadedFilename(result.filename); + setShouldCreateFilter(true); + } + // Move to processing step - task monitoring will handle completion setTimeout(() => { setCurrentStep(1); }, 1500); } catch (error) { - console.error("Upload failed", (error as Error).message); + const errorMessage = error instanceof Error ? error.message : "Upload failed"; + console.error("Upload failed", errorMessage); + + // Show error toast notification + toast.error("Document upload failed", { + description: errorMessage, + duration: 5000, + }); + // Reset on error setCurrentStep(null); } finally { diff --git a/frontend/app/onboarding/_components/openai-onboarding.tsx b/frontend/app/onboarding/_components/openai-onboarding.tsx index db676553..d01cb64a 100644 --- a/frontend/app/onboarding/_components/openai-onboarding.tsx +++ b/frontend/app/onboarding/_components/openai-onboarding.tsx @@ -50,7 +50,12 @@ export function OpenAIOnboarding({ : debouncedApiKey ? { apiKey: debouncedApiKey } : undefined, - { enabled: debouncedApiKey !== "" || getFromEnv || alreadyConfigured }, + { + // Only validate when the user opts in (env) or provides a key. + // If a key was previously configured, let the user decide to reuse or replace it + // without triggering an immediate validation error. + enabled: debouncedApiKey !== "" || getFromEnv, + }, ); // Use custom hook for model selection logic const { @@ -134,11 +139,12 @@ export function OpenAIOnboarding({ } value={apiKey} onChange={(e) => setApiKey(e.target.value)} - disabled={alreadyConfigured} + // Even if a key exists, allow replacing it to avoid getting stuck on stale creds. + disabled={false} /> {alreadyConfigured && (

- Reusing key from model provider selection. + Existing OpenAI key detected. You can reuse it or enter a new one.

)} {isLoadingModels && ( diff --git a/frontend/components/chat-renderer.tsx b/frontend/components/chat-renderer.tsx index 01a5ca75..6804b065 100644 --- a/frontend/components/chat-renderer.tsx +++ b/frontend/components/chat-renderer.tsx @@ -1,12 +1,13 @@ "use client"; import { motion } from "framer-motion"; -import { usePathname } from "next/navigation"; -import { useEffect, useState } from "react"; +import { usePathname, useRouter } from "next/navigation"; +import { useCallback, useEffect, useState } from "react"; import { type ChatConversation, useGetConversationsQuery, } from "@/app/api/queries/useGetConversationsQuery"; +import { getFilterById } from "@/app/api/queries/useGetFilterByIdQuery"; import type { Settings } from "@/app/api/queries/useGetSettingsQuery"; import { OnboardingContent } from "@/app/onboarding/_components/onboarding-content"; import { ProgressBar } from "@/app/onboarding/_components/progress-bar"; @@ -20,9 +21,11 @@ import { HEADER_HEIGHT, ONBOARDING_ASSISTANT_MESSAGE_KEY, ONBOARDING_CARD_STEPS_KEY, + ONBOARDING_OPENRAG_DOCS_FILTER_ID_KEY, ONBOARDING_SELECTED_NUDGE_KEY, ONBOARDING_STEP_KEY, ONBOARDING_UPLOAD_STEPS_KEY, + ONBOARDING_USER_DOC_FILTER_ID_KEY, SIDEBAR_WIDTH, TOTAL_ONBOARDING_STEPS, } from "@/lib/constants"; @@ -36,12 +39,16 @@ export function ChatRenderer({ children: React.ReactNode; }) { const pathname = usePathname(); + const router = useRouter(); const { isAuthenticated, isNoAuthMode } = useAuth(); const { endpoint, refreshTrigger, refreshConversations, startNewConversation, + setConversationFilter, + setCurrentConversationId, + setPreviousResponseIds, } = useChat(); // Initialize onboarding state based on local storage and settings @@ -71,6 +78,78 @@ export function ChatRenderer({ startNewConversation(); }; + // Navigate to /chat when onboarding is active so animation reveals chat underneath + useEffect(() => { + if (!showLayout && pathname !== "/chat" && pathname !== "/") { + router.push("/chat"); + } + }, [showLayout, pathname, router]); + + // Helper to store default filter ID for new conversations after onboarding + const storeDefaultFilterForNewConversations = useCallback( + async (preferUserDoc: boolean) => { + if (typeof window === "undefined") return; + + // Check if we already have a default filter set + const existingDefault = localStorage.getItem("default_conversation_filter_id"); + if (existingDefault) { + console.log("[FILTER] Default filter already set:", existingDefault); + // Try to apply it to context state (don't save to localStorage to avoid overwriting) + try { + const filter = await getFilterById(existingDefault); + if (filter) { + // Pass null to skip localStorage save + setConversationFilter(filter, null); + return; // Successfully loaded and set, we're done + } + } catch (error) { + console.error("Failed to load existing default filter, will set new one:", error); + // Filter doesn't exist anymore, clear it and continue to set a new one + localStorage.removeItem("default_conversation_filter_id"); + } + } + + // Try to get the appropriate filter ID + let filterId: string | null = null; + + if (preferUserDoc) { + // Completed full onboarding - prefer user document filter + filterId = localStorage.getItem(ONBOARDING_USER_DOC_FILTER_ID_KEY); + console.log("[FILTER] User doc filter ID:", filterId); + } + + // Fall back to OpenRAG docs filter + if (!filterId) { + filterId = localStorage.getItem(ONBOARDING_OPENRAG_DOCS_FILTER_ID_KEY); + console.log("[FILTER] OpenRAG docs filter ID:", filterId); + } + + console.log("[FILTER] Final filter ID to use:", filterId); + + if (filterId) { + // Store this as the default filter for new conversations + localStorage.setItem("default_conversation_filter_id", filterId); + + // Apply filter to context state only (don't save to localStorage since there's no conversation yet) + // The default_conversation_filter_id will be used when a new conversation is started + try { + const filter = await getFilterById(filterId); + console.log("[FILTER] Loaded filter:", filter); + if (filter) { + // Pass null to skip localStorage save - this prevents overwriting existing conversation filters + setConversationFilter(filter, null); + console.log("[FILTER] Set conversation filter (no save):", filter.id); + } + } catch (error) { + console.error("Failed to set onboarding filter:", error); + } + } else { + console.log("[FILTER] No filter ID found, not setting default"); + } + }, + [setConversationFilter] + ); + // Save current step to local storage whenever it changes useEffect(() => { if (typeof window !== "undefined" && !showLayout) { @@ -78,7 +157,7 @@ export function ChatRenderer({ } }, [currentStep, showLayout]); - const handleStepComplete = () => { + const handleStepComplete = async () => { if (currentStep < TOTAL_ONBOARDING_STEPS - 1) { setCurrentStep(currentStep + 1); } else { @@ -90,6 +169,20 @@ export function ChatRenderer({ localStorage.removeItem(ONBOARDING_CARD_STEPS_KEY); localStorage.removeItem(ONBOARDING_UPLOAD_STEPS_KEY); } + + // Clear ALL conversation state so next message starts fresh + await startNewConversation(); + + // Store the user document filter as default for new conversations and load it + await storeDefaultFilterForNewConversations(true); + + // Clean up onboarding filter IDs now that we've set the default + if (typeof window !== "undefined") { + localStorage.removeItem(ONBOARDING_OPENRAG_DOCS_FILTER_ID_KEY); + localStorage.removeItem(ONBOARDING_USER_DOC_FILTER_ID_KEY); + console.log("[FILTER] Cleaned up onboarding filter IDs"); + } + setShowLayout(true); } }; @@ -109,6 +202,8 @@ export function ChatRenderer({ localStorage.removeItem(ONBOARDING_CARD_STEPS_KEY); localStorage.removeItem(ONBOARDING_UPLOAD_STEPS_KEY); } + // Store the OpenRAG docs filter as default for new conversations + storeDefaultFilterForNewConversations(false); setShowLayout(true); }; diff --git a/frontend/components/knowledge-filter-panel.tsx b/frontend/components/knowledge-filter-panel.tsx index e30f13a7..4e044442 100644 --- a/frontend/components/knowledge-filter-panel.tsx +++ b/frontend/components/knowledge-filter-panel.tsx @@ -465,6 +465,7 @@ export function KnowledgeFilterPanel() { disabled={isSaving} variant="outline" size="sm" + className="relative z-10" > Cancel @@ -475,6 +476,7 @@ export function KnowledgeFilterPanel() { size="sm" onClick={handleDeleteFilter} disabled={isSaving} + className="relative z-10" > Delete Filter @@ -483,7 +485,7 @@ export function KnowledgeFilterPanel() { onClick={handleSaveConfiguration} disabled={isSaving} size="sm" - className="relative" + className="relative z-10" > {isSaving && ( <> diff --git a/frontend/components/navigation.tsx b/frontend/components/navigation.tsx index b172779e..68ffa4e6 100644 --- a/frontend/components/navigation.tsx +++ b/frontend/components/navigation.tsx @@ -289,7 +289,7 @@ export function Navigation({ handleNewConversation(); } else if (activeConvo) { loadConversation(activeConvo); - refreshConversations(); + // Don't call refreshConversations here - it causes unnecessary refetches } else if ( conversations.length > 0 && currentConversationId === null && @@ -473,7 +473,7 @@ export function Navigation({ onClick={() => { if (loading || isConversationsLoading) return; loadConversation(conversation); - refreshConversations(); + // Don't refresh - just loading an existing conversation }} disabled={loading || isConversationsLoading} > diff --git a/frontend/contexts/chat-context.tsx b/frontend/contexts/chat-context.tsx index 8c203003..bee05b98 100644 --- a/frontend/contexts/chat-context.tsx +++ b/frontend/contexts/chat-context.tsx @@ -65,7 +65,7 @@ interface ChatContextType { refreshConversationsSilent: () => Promise; refreshTrigger: number; refreshTriggerSilent: number; - loadConversation: (conversation: ConversationData) => void; + loadConversation: (conversation: ConversationData) => Promise; startNewConversation: () => void; conversationData: ConversationData | null; forkFromResponse: (responseId: string) => void; @@ -77,7 +77,8 @@ interface ChatContextType { conversationLoaded: boolean; setConversationLoaded: (loaded: boolean) => void; conversationFilter: KnowledgeFilter | null; - setConversationFilter: (filter: KnowledgeFilter | null) => void; + // responseId: undefined = use currentConversationId, null = don't save to localStorage + setConversationFilter: (filter: KnowledgeFilter | null, responseId?: string | null) => void; } const ChatContext = createContext(undefined); @@ -112,6 +113,8 @@ export function ChatProvider({ children }: ChatProviderProps) { const refreshTimeoutRef = useRef(null); const refreshConversations = useCallback((force = false) => { + console.log("[REFRESH] refreshConversations called, force:", force); + if (force) { // Immediate refresh for important updates like new conversations setRefreshTrigger((prev) => prev + 1); @@ -145,22 +148,59 @@ export function ChatProvider({ children }: ChatProviderProps) { }, []); const loadConversation = useCallback( - (conversation: ConversationData) => { + async (conversation: ConversationData) => { + console.log("[CONVERSATION] Loading conversation:", { + conversationId: conversation.response_id, + title: conversation.title, + endpoint: conversation.endpoint, + }); + setCurrentConversationId(conversation.response_id); setEndpoint(conversation.endpoint); // Store the full conversation data for the chat page to use setConversationData(conversation); + // Load the filter if one exists for this conversation - // Only update the filter if this is a different conversation (to preserve user's filter selection) - setConversationFilterState((currentFilter) => { - // If we're loading a different conversation, load its filter - // Otherwise keep the current filter (don't reset it when conversation refreshes) - const isDifferentConversation = - conversation.response_id !== conversationData?.response_id; - return isDifferentConversation - ? conversation.filter || null - : currentFilter; - }); + // Always update the filter to match the conversation being loaded + const isDifferentConversation = + conversation.response_id !== conversationData?.response_id; + + if (isDifferentConversation && typeof window !== "undefined") { + // Try to load the saved filter from localStorage + const savedFilterId = localStorage.getItem(`conversation_filter_${conversation.response_id}`); + console.log("[CONVERSATION] Looking for filter:", { + conversationId: conversation.response_id, + savedFilterId, + }); + + if (savedFilterId) { + // Import getFilterById dynamically to avoid circular dependency + const { getFilterById } = await import("@/app/api/queries/useGetFilterByIdQuery"); + try { + const filter = await getFilterById(savedFilterId); + + if (filter) { + console.log("[CONVERSATION] Loaded filter:", filter.name, filter.id); + setConversationFilterState(filter); + // Update conversation data with the loaded filter + setConversationData((prev) => { + if (!prev) return prev; + return { ...prev, filter }; + }); + } + } catch (error) { + console.error("[CONVERSATION] Failed to load filter:", error); + // Filter was deleted, clean up localStorage + localStorage.removeItem(`conversation_filter_${conversation.response_id}`); + setConversationFilterState(null); + } + } else { + // No saved filter in localStorage, clear the current filter + console.log("[CONVERSATION] No filter found for this conversation"); + setConversationFilterState(null); + } + } + // Clear placeholder when loading a real conversation setPlaceholderConversation(null); setConversationLoaded(true); @@ -170,15 +210,48 @@ export function ChatProvider({ children }: ChatProviderProps) { [conversationData?.response_id], ); - const startNewConversation = useCallback(() => { + const startNewConversation = useCallback(async () => { + console.log("[CONVERSATION] Starting new conversation"); + // Clear current conversation data and reset state setCurrentConversationId(null); setPreviousResponseIds({ chat: null, langflow: null }); setConversationData(null); setConversationDocs([]); setConversationLoaded(false); - // Clear the filter when starting a new conversation - setConversationFilterState(null); + + // Load default filter if available (and clear it after first use) + if (typeof window !== "undefined") { + const defaultFilterId = localStorage.getItem("default_conversation_filter_id"); + console.log("[CONVERSATION] Default filter ID:", defaultFilterId); + + if (defaultFilterId) { + // Clear the default filter now so it's only used once + localStorage.removeItem("default_conversation_filter_id"); + console.log("[CONVERSATION] Cleared default filter (used once)"); + + try { + const { getFilterById } = await import("@/app/api/queries/useGetFilterByIdQuery"); + const filter = await getFilterById(defaultFilterId); + + if (filter) { + console.log("[CONVERSATION] Loaded default filter:", filter.name, filter.id); + setConversationFilterState(filter); + } else { + // Default filter was deleted + setConversationFilterState(null); + } + } catch (error) { + console.error("[CONVERSATION] Failed to load default filter:", error); + setConversationFilterState(null); + } + } else { + console.log("[CONVERSATION] No default filter set"); + setConversationFilterState(null); + } + } else { + setConversationFilterState(null); + } // Create a temporary placeholder conversation to show in sidebar const placeholderConversation: ConversationData = { @@ -230,7 +303,7 @@ export function ChatProvider({ children }: ChatProviderProps) { ); const setConversationFilter = useCallback( - (filter: KnowledgeFilter | null) => { + (filter: KnowledgeFilter | null, responseId?: string | null) => { setConversationFilterState(filter); // Update the conversation data to include the filter setConversationData((prev) => { @@ -240,8 +313,24 @@ export function ChatProvider({ children }: ChatProviderProps) { filter, }; }); + + // Determine which conversation ID to use for saving + // - undefined: use currentConversationId (default behavior) + // - null: explicitly skip saving to localStorage + // - string: use the provided responseId + const targetId = responseId === undefined ? currentConversationId : responseId; + + // Save filter association for the target conversation + if (typeof window !== "undefined" && targetId) { + const key = `conversation_filter_${targetId}`; + if (filter) { + localStorage.setItem(key, filter.id); + } else { + localStorage.removeItem(key); + } + } }, - [], + [currentConversationId], ); const value = useMemo( diff --git a/frontend/hooks/useChatStreaming.ts b/frontend/hooks/useChatStreaming.ts index b2877fd0..c67a0ca6 100644 --- a/frontend/hooks/useChatStreaming.ts +++ b/frontend/hooks/useChatStreaming.ts @@ -4,6 +4,7 @@ import type { Message, SelectedFilters, } from "@/app/chat/_types/types"; +import { useChat } from "@/contexts/chat-context"; interface UseChatStreamingOptions { endpoint?: string; @@ -15,6 +16,7 @@ interface SendMessageOptions { prompt: string; previousResponseId?: string; filters?: SelectedFilters; + filter_id?: string; limit?: number; scoreThreshold?: number; } @@ -31,10 +33,13 @@ export function useChatStreaming({ const streamAbortRef = useRef(null); const streamIdRef = useRef(0); + const { refreshConversations } = useChat(); + const sendMessage = async ({ prompt, previousResponseId, filters, + filter_id, limit = 10, scoreThreshold = 0, }: SendMessageOptions) => { @@ -73,6 +78,7 @@ export function useChatStreaming({ stream: boolean; previous_response_id?: string; filters?: SelectedFilters; + filter_id?: string; limit?: number; scoreThreshold?: number; } = { @@ -90,6 +96,12 @@ export function useChatStreaming({ requestBody.filters = filters; } + if (filter_id) { + requestBody.filter_id = filter_id; + } + + console.log("[useChatStreaming] Sending request:", { filter_id, requestBody }); + const response = await fetch(endpoint, { method: "POST", headers: { @@ -489,6 +501,7 @@ export function useChatStreaming({ // Clear streaming message and call onComplete with final message setStreamingMessage(null); onComplete?.(finalMessage, newResponseId); + refreshConversations(true); return finalMessage; } diff --git a/frontend/lib/constants.ts b/frontend/lib/constants.ts index cc5d2bdb..88baf8d0 100644 --- a/frontend/lib/constants.ts +++ b/frontend/lib/constants.ts @@ -45,6 +45,8 @@ export const ONBOARDING_ASSISTANT_MESSAGE_KEY = "onboarding_assistant_message"; export const ONBOARDING_SELECTED_NUDGE_KEY = "onboarding_selected_nudge"; export const ONBOARDING_CARD_STEPS_KEY = "onboarding_card_steps"; export const ONBOARDING_UPLOAD_STEPS_KEY = "onboarding_upload_steps"; +export const ONBOARDING_OPENRAG_DOCS_FILTER_ID_KEY = "onboarding_openrag_docs_filter_id"; +export const ONBOARDING_USER_DOC_FILTER_ID_KEY = "onboarding_user_doc_filter_id"; export const FILES_REGEX = /(?<=I'm uploading a document called ['"])[^'"]+\.[^.]+(?=['"]\. Here is its content:)/; diff --git a/frontend/lib/upload-utils.ts b/frontend/lib/upload-utils.ts index 8a16f3b9..9892bde7 100644 --- a/frontend/lib/upload-utils.ts +++ b/frontend/lib/upload-utils.ts @@ -10,6 +10,8 @@ export interface UploadFileResult { deletion: unknown; unified: boolean; raw: unknown; + createFilter?: boolean; + filename?: string; } export async function duplicateCheck( @@ -120,11 +122,15 @@ export async function uploadFileForContext( export async function uploadFile( file: File, replace = false, + createFilter = false, ): Promise { try { const formData = new FormData(); formData.append("file", file); formData.append("replace_duplicates", replace.toString()); + if (createFilter) { + formData.append("create_filter", "true"); + } const uploadResponse = await fetch("/api/router/upload_ingest", { method: "POST", @@ -177,6 +183,11 @@ export async function uploadFile( ); } + const shouldCreateFilter = (uploadIngestJson as { create_filter?: boolean }) + .create_filter; + const filename = (uploadIngestJson as { filename?: string }) + .filename; + const result: UploadFileResult = { fileId, filePath, @@ -184,6 +195,8 @@ export async function uploadFile( deletion: deletionJson, unified: true, raw: uploadIngestJson, + createFilter: shouldCreateFilter, + filename, }; return result; diff --git a/src/agent.py b/src/agent.py index 74da24e7..bd4d257f 100644 --- a/src/agent.py +++ b/src/agent.py @@ -1,3 +1,5 @@ +from http.client import HTTPException + from utils.logging_config import get_logger logger = get_logger(__name__) @@ -67,6 +69,7 @@ def store_conversation_thread(user_id: str, response_id: str, conversation_state "created_at": conversation_state.get("created_at"), "last_activity": conversation_state.get("last_activity"), "previous_response_id": conversation_state.get("previous_response_id"), + "filter_id": conversation_state.get("filter_id"), "total_messages": len( [msg for msg in messages if msg.get("role") in ["user", "assistant"]] ), @@ -219,15 +222,26 @@ async def async_response( response = await client.responses.create(**request_params) - response_text = response.output_text - logger.info("Response generated", log_prefix=log_prefix, response=response_text) + # Check if response has output_text using getattr to avoid issues with special objects + output_text = getattr(response, "output_text", None) + if output_text is not None: + response_text = output_text + logger.info("Response generated", log_prefix=log_prefix, response=response_text) - # Extract and store response_id if available - response_id = getattr(response, "id", None) or getattr( - response, "response_id", None - ) + # Extract and store response_id if available + response_id = getattr(response, "id", None) or getattr( + response, "response_id", None + ) - return response_text, response_id, response + return response_text, response_id, response + else: + msg = "Nudge response missing output_text" + error = getattr(response, "error", None) + if error: + error_msg = getattr(error, "message", None) + if error_msg: + msg = error_msg + raise ValueError(msg) except Exception as e: logger.error("Exception in non-streaming response", error=str(e)) import traceback @@ -314,6 +328,7 @@ async def async_chat( user_id: str, model: str = "gpt-4.1-mini", previous_response_id: str = None, + filter_id: str = None, ): logger.debug( "async_chat called", user_id=user_id, previous_response_id=previous_response_id @@ -334,6 +349,10 @@ async def async_chat( "Added user message", message_count=len(conversation_state["messages"]) ) + # Store filter_id in conversation state if provided + if filter_id: + conversation_state["filter_id"] = filter_id + response_text, response_id, response_obj = await async_response( async_client, prompt, @@ -389,6 +408,7 @@ async def async_chat_stream( user_id: str, model: str = "gpt-4.1-mini", previous_response_id: str = None, + filter_id: str = None, ): # Get the specific conversation thread (or create new one) conversation_state = get_conversation_thread(user_id, previous_response_id) @@ -399,6 +419,10 @@ async def async_chat_stream( user_message = {"role": "user", "content": prompt, "timestamp": datetime.now()} conversation_state["messages"].append(user_message) + # Store filter_id in conversation state if provided + if filter_id: + conversation_state["filter_id"] = filter_id + full_response = "" response_id = None async for chunk in async_stream( @@ -452,6 +476,7 @@ async def async_langflow_chat( extra_headers: dict = None, previous_response_id: str = None, store_conversation: bool = True, + filter_id: str = None, ): logger.debug( "async_langflow_chat called", @@ -478,6 +503,10 @@ async def async_langflow_chat( message_count=len(conversation_state["messages"]), ) + # Store filter_id in conversation state if provided + if filter_id: + conversation_state["filter_id"] = filter_id + response_text, response_id, response_obj = await async_response( langflow_client, prompt, @@ -562,6 +591,7 @@ async def async_langflow_chat_stream( user_id: str, extra_headers: dict = None, previous_response_id: str = None, + filter_id: str = None, ): logger.debug( "async_langflow_chat_stream called", @@ -578,6 +608,10 @@ async def async_langflow_chat_stream( user_message = {"role": "user", "content": prompt, "timestamp": datetime.now()} conversation_state["messages"].append(user_message) + # Store filter_id in conversation state if provided + if filter_id: + conversation_state["filter_id"] = filter_id + full_response = "" response_id = None collected_chunks = [] # Store all chunks for function call data diff --git a/src/api/chat.py b/src/api/chat.py index 58492118..56da5b2d 100644 --- a/src/api/chat.py +++ b/src/api/chat.py @@ -14,6 +14,7 @@ async def chat_endpoint(request: Request, chat_service, session_manager): filters = data.get("filters") limit = data.get("limit", 10) score_threshold = data.get("scoreThreshold", 0) + filter_id = data.get("filter_id") user = request.state.user user_id = user.user_id @@ -42,6 +43,7 @@ async def chat_endpoint(request: Request, chat_service, session_manager): jwt_token, previous_response_id=previous_response_id, stream=True, + filter_id=filter_id, ), media_type="text/event-stream", headers={ @@ -58,6 +60,7 @@ async def chat_endpoint(request: Request, chat_service, session_manager): jwt_token, previous_response_id=previous_response_id, stream=False, + filter_id=filter_id, ) return JSONResponse(result) @@ -71,6 +74,7 @@ async def langflow_endpoint(request: Request, chat_service, session_manager): filters = data.get("filters") limit = data.get("limit", 10) score_threshold = data.get("scoreThreshold", 0) + filter_id = data.get("filter_id") user = request.state.user user_id = user.user_id @@ -100,6 +104,7 @@ async def langflow_endpoint(request: Request, chat_service, session_manager): jwt_token, previous_response_id=previous_response_id, stream=True, + filter_id=filter_id, ), media_type="text/event-stream", headers={ @@ -116,6 +121,7 @@ async def langflow_endpoint(request: Request, chat_service, session_manager): jwt_token, previous_response_id=previous_response_id, stream=False, + filter_id=filter_id, ) return JSONResponse(result) diff --git a/src/api/router.py b/src/api/router.py index 15a9b116..79d03df5 100644 --- a/src/api/router.py +++ b/src/api/router.py @@ -37,6 +37,7 @@ async def upload_ingest_router( # Route based on configuration if DISABLE_INGEST_WITH_LANGFLOW: # Route to traditional OpenRAG upload + # Note: onboarding filter creation is only supported in Langflow path logger.debug("Routing to traditional OpenRAG upload") return await traditional_upload(request, document_service, session_manager) else: @@ -77,6 +78,7 @@ async def langflow_upload_ingest_task( tweaks_json = form.get("tweaks") delete_after_ingest = form.get("delete_after_ingest", "true").lower() == "true" replace_duplicates = form.get("replace_duplicates", "false").lower() == "true" + create_filter = form.get("create_filter", "false").lower() == "true" # Parse JSON fields if provided settings = None @@ -177,14 +179,15 @@ async def langflow_upload_ingest_task( logger.debug("Langflow upload task created successfully", task_id=task_id) - return JSONResponse( - { - "task_id": task_id, - "message": f"Langflow upload task created for {len(upload_files)} file(s)", - "file_count": len(upload_files), - }, - status_code=202, - ) # 202 Accepted for async processing + response_data = { + "task_id": task_id, + "message": f"Langflow upload task created for {len(upload_files)} file(s)", + "file_count": len(upload_files), + "create_filter": create_filter, # Pass flag back to frontend + "filename": original_filenames[0] if len(original_filenames) == 1 else None, # Pass filename for filter creation + } + + return JSONResponse(response_data, status_code=202) # 202 Accepted for async processing except Exception: # Clean up temp files on error diff --git a/src/api/settings.py b/src/api/settings.py index bfa493d9..54ad477e 100644 --- a/src/api/settings.py +++ b/src/api/settings.py @@ -558,7 +558,7 @@ async def update_settings(request, session_manager): # Update provider-specific settings provider_updated = False if "openai_api_key" in body and body["openai_api_key"].strip(): - current_config.providers.openai.api_key = body["openai_api_key"] + current_config.providers.openai.api_key = body["openai_api_key"].strip() current_config.providers.openai.configured = True config_updated = True provider_updated = True @@ -617,6 +617,9 @@ async def update_settings(request, session_manager): "watsonx_api_key", "watsonx_endpoint", "watsonx_project_id", "ollama_endpoint" ] + + await clients.refresh_patched_client() + if any(key in body for key in provider_fields_to_check): try: flows_service = _get_flows_service() @@ -624,8 +627,11 @@ async def update_settings(request, session_manager): # Update global variables await _update_langflow_global_variables(current_config) + # Update LLM client credentials when embedding selection changes if "embedding_provider" in body or "embedding_model" in body: - await _update_mcp_servers_with_provider_credentials(current_config) + await _update_mcp_servers_with_provider_credentials( + current_config, session_manager + ) # Update model values if provider or model changed if "llm_provider" in body or "llm_model" in body or "embedding_provider" in body or "embedding_model" in body: @@ -636,6 +642,7 @@ async def update_settings(request, session_manager): # Don't fail the entire settings update if Langflow update fails # The config was still saved + logger.info( "Configuration updated successfully", updated_fields=list(body.keys()) ) @@ -795,7 +802,7 @@ async def onboarding(request, flows_service, session_manager=None): # Update provider-specific credentials if "openai_api_key" in body and body["openai_api_key"].strip(): - current_config.providers.openai.api_key = body["openai_api_key"] + current_config.providers.openai.api_key = body["openai_api_key"].strip() current_config.providers.openai.configured = True config_updated = True @@ -1061,11 +1068,34 @@ async def onboarding(request, flows_service, session_manager=None): {"error": "Failed to save configuration"}, status_code=500 ) + # Refresh cached patched client so latest credentials take effect immediately + await clients.refresh_patched_client() + + # Create OpenRAG Docs knowledge filter if sample data was ingested + # Only create on embedding step to avoid duplicates (both LLM and embedding cards submit with sample_data) + openrag_docs_filter_id = None + if should_ingest_sample_data and ("embedding_provider" in body or "embedding_model" in body): + try: + openrag_docs_filter_id = await _create_openrag_docs_filter( + request, session_manager + ) + if openrag_docs_filter_id: + logger.info( + "Created OpenRAG Docs knowledge filter", + filter_id=openrag_docs_filter_id, + ) + except Exception as e: + logger.error( + "Failed to create OpenRAG Docs knowledge filter", error=str(e) + ) + # Don't fail onboarding if filter creation fails + return JSONResponse( { "message": "Onboarding configuration updated successfully", "edited": True, # Confirm that config is now marked as edited "sample_data_ingested": should_ingest_sample_data, + "openrag_docs_filter_id": openrag_docs_filter_id, } ) @@ -1081,6 +1111,73 @@ async def onboarding(request, flows_service, session_manager=None): ) +async def _create_openrag_docs_filter(request, session_manager): + """Create the OpenRAG Docs knowledge filter for onboarding""" + import uuid + import json + from datetime import datetime + + # Get knowledge filter service from app state + app = request.scope.get("app") + if not app or not hasattr(app.state, "services"): + logger.error("Could not access services for knowledge filter creation") + return None + + knowledge_filter_service = app.state.services.get("knowledge_filter_service") + if not knowledge_filter_service: + logger.error("Knowledge filter service not available") + return None + + # Get user and JWT token from request + user = request.state.user + jwt_token = session_manager.get_effective_jwt_token(user.user_id, request.state.jwt_token) + + # In no-auth mode, set owner to None so filter is visible to all users + # In auth mode, use the actual user as owner + if is_no_auth_mode(): + owner_user_id = None + else: + owner_user_id = user.user_id + + # Create the filter document + filter_id = str(uuid.uuid4()) + query_data = json.dumps({ + "query": "", + "filters": { + "data_sources": ["openrag-documentation.pdf"], + "document_types": ["*"], + "owners": ["*"], + "connector_types": ["*"], + }, + "limit": 10, + "scoreThreshold": 0, + "color": "blue", + "icon": "book", + }) + + filter_doc = { + "id": filter_id, + "name": "OpenRAG Docs", + "description": "Filter for OpenRAG documentation", + "query_data": query_data, + "owner": owner_user_id, + "allowed_users": [], + "allowed_groups": [], + "created_at": datetime.utcnow().isoformat(), + "updated_at": datetime.utcnow().isoformat(), + } + + result = await knowledge_filter_service.create_knowledge_filter( + filter_doc, user_id=user.user_id, jwt_token=jwt_token + ) + + if result.get("success"): + return filter_id + else: + logger.error("Failed to create OpenRAG Docs filter", error=result.get("error")) + return None + + def _get_flows_service(): """Helper function to get flows service instance""" from services.flows_service import FlowsService diff --git a/src/config/settings.py b/src/config/settings.py index df221986..75b09f09 100644 --- a/src/config/settings.py +++ b/src/config/settings.py @@ -165,18 +165,36 @@ async def generate_langflow_api_key(modify: bool = False): if validation_response.status_code == 200: logger.debug("Cached API key is valid", key_prefix=LANGFLOW_KEY[:8]) return LANGFLOW_KEY - else: + elif validation_response.status_code in (401, 403): logger.warning( - "Cached API key is invalid, generating fresh key", + "Cached API key is unauthorized, generating fresh key", status_code=validation_response.status_code, ) LANGFLOW_KEY = None # Clear invalid key - except Exception as e: + else: + logger.warning( + "Cached API key validation returned non-access error; keeping existing key", + status_code=validation_response.status_code, + ) + return LANGFLOW_KEY + except requests.exceptions.Timeout as e: logger.warning( - "Cached API key validation failed, generating fresh key", + "Cached API key validation timed out; keeping existing key", error=str(e), ) - LANGFLOW_KEY = None # Clear invalid key + return LANGFLOW_KEY + except requests.exceptions.RequestException as e: + logger.warning( + "Cached API key validation failed due to request error; keeping existing key", + error=str(e), + ) + return LANGFLOW_KEY + except Exception as e: + logger.warning( + "Unexpected error during cached API key validation; keeping existing key", + error=str(e), + ) + return LANGFLOW_KEY # Use default langflow/langflow credentials if auto-login is enabled and credentials not set username = LANGFLOW_SUPERUSER @@ -279,7 +297,7 @@ class AppClients: self.opensearch = None self.langflow_client = None self.langflow_http_client = None - self._patched_async_client = None # Private attribute + self._patched_async_client = None # Private attribute - single client for all providers self._client_init_lock = __import__('threading').Lock() # Lock for thread-safe initialization self.converter = None @@ -364,6 +382,9 @@ class AppClients: Property that ensures OpenAI client is initialized on first access. This allows lazy initialization so the app can start without an API key. + The client is patched with LiteLLM support to handle multiple providers. + All provider credentials are loaded into environment for LiteLLM routing. + Note: The client is a long-lived singleton that should be closed via cleanup(). Thread-safe via lock to prevent concurrent initialization attempts. """ @@ -377,21 +398,40 @@ class AppClients: if self._patched_async_client is not None: return self._patched_async_client - # Try to initialize the client on-demand - # First check if OPENAI_API_KEY is in environment - openai_key = os.getenv("OPENAI_API_KEY") - - if not openai_key: - # Try to get from config (in case it was set during onboarding) - try: - config = get_openrag_config() - if config and config.provider and config.provider.api_key: - openai_key = config.provider.api_key - # Set it in environment so AsyncOpenAI can pick it up - os.environ["OPENAI_API_KEY"] = openai_key - logger.info("Loaded OpenAI API key from config file") - except Exception as e: - logger.debug("Could not load OpenAI key from config", error=str(e)) + # Load all provider credentials into environment for LiteLLM + # LiteLLM routes based on model name prefixes (openai/, ollama/, watsonx/, etc.) + try: + config = get_openrag_config() + + # Set OpenAI credentials + if config.providers.openai.api_key: + os.environ["OPENAI_API_KEY"] = config.providers.openai.api_key + logger.debug("Loaded OpenAI API key from config") + + # Set Anthropic credentials + if config.providers.anthropic.api_key: + os.environ["ANTHROPIC_API_KEY"] = config.providers.anthropic.api_key + logger.debug("Loaded Anthropic API key from config") + + # Set WatsonX credentials + if config.providers.watsonx.api_key: + os.environ["WATSONX_API_KEY"] = config.providers.watsonx.api_key + if config.providers.watsonx.endpoint: + os.environ["WATSONX_ENDPOINT"] = config.providers.watsonx.endpoint + os.environ["WATSONX_API_BASE"] = config.providers.watsonx.endpoint # LiteLLM expects this name + if config.providers.watsonx.project_id: + os.environ["WATSONX_PROJECT_ID"] = config.providers.watsonx.project_id + if config.providers.watsonx.api_key: + logger.debug("Loaded WatsonX credentials from config") + + # Set Ollama endpoint + if config.providers.ollama.endpoint: + os.environ["OLLAMA_BASE_URL"] = config.providers.ollama.endpoint + os.environ["OLLAMA_ENDPOINT"] = config.providers.ollama.endpoint + logger.debug("Loaded Ollama endpoint from config") + + except Exception as e: + logger.debug("Could not load provider credentials from config", error=str(e)) # Try to initialize the client - AsyncOpenAI() will read from environment # We'll try HTTP/2 first with a probe, then fall back to HTTP/1.1 if it times out @@ -455,6 +495,27 @@ class AppClients: return self._patched_async_client + @property + def patched_llm_client(self): + """Alias for patched_async_client - for backward compatibility with code expecting separate clients.""" + return self.patched_async_client + + @property + def patched_embedding_client(self): + """Alias for patched_async_client - for backward compatibility with code expecting separate clients.""" + return self.patched_async_client + + async def refresh_patched_client(self): + """Reset patched client so next use picks up updated provider credentials.""" + if self._patched_async_client is not None: + try: + await self._patched_async_client.close() + logger.info("Closed patched client for refresh") + except Exception as e: + logger.warning("Failed to close patched client during refresh", error=str(e)) + finally: + self._patched_async_client = None + async def cleanup(self): """Cleanup resources - should be called on application shutdown""" # Close AsyncOpenAI client if it was created @@ -750,4 +811,4 @@ def get_agent_config(): def get_embedding_model() -> str: """Return the currently configured embedding model.""" - return get_openrag_config().knowledge.embedding_model or EMBED_MODEL if DISABLE_INGEST_WITH_LANGFLOW else "" \ No newline at end of file + return get_openrag_config().knowledge.embedding_model or EMBED_MODEL if DISABLE_INGEST_WITH_LANGFLOW else "" diff --git a/src/models/processors.py b/src/models/processors.py index 7edbc475..9731adb7 100644 --- a/src/models/processors.py +++ b/src/models/processors.py @@ -209,7 +209,7 @@ class TaskProcessor: embeddings = [] for batch in text_batches: - resp = await clients.patched_async_client.embeddings.create( + resp = await clients.patched_embedding_client.embeddings.create( model=embedding_model, input=batch ) embeddings.extend([d.embedding for d in resp.data]) diff --git a/src/services/chat_service.py b/src/services/chat_service.py index 040f03d8..e965623c 100644 --- a/src/services/chat_service.py +++ b/src/services/chat_service.py @@ -15,6 +15,7 @@ class ChatService: jwt_token: str = None, previous_response_id: str = None, stream: bool = False, + filter_id: str = None, ): """Handle chat requests using the patched OpenAI client""" if not prompt: @@ -26,17 +27,19 @@ class ChatService: if stream: return async_chat_stream( - clients.patched_async_client, + clients.patched_llm_client, prompt, user_id, previous_response_id=previous_response_id, + filter_id=filter_id, ) else: response_text, response_id = await async_chat( - clients.patched_async_client, + clients.patched_llm_client, prompt, user_id, previous_response_id=previous_response_id, + filter_id=filter_id, ) response_data = {"response": response_text} if response_id: @@ -50,6 +53,7 @@ class ChatService: jwt_token: str = None, previous_response_id: str = None, stream: bool = False, + filter_id: str = None, ): """Handle Langflow chat requests""" if not prompt: @@ -147,6 +151,7 @@ class ChatService: user_id, extra_headers=extra_headers, previous_response_id=previous_response_id, + filter_id=filter_id, ) else: from agent import async_langflow_chat @@ -158,6 +163,7 @@ class ChatService: user_id, extra_headers=extra_headers, previous_response_id=previous_response_id, + filter_id=filter_id, ) response_data = {"response": response_text} if response_id: @@ -344,7 +350,7 @@ class ChatService: if user_id and jwt_token: set_auth_context(user_id, jwt_token) response_text, response_id = await async_chat( - clients.patched_async_client, + clients.patched_llm_client, document_prompt, user_id, previous_response_id=previous_response_id, @@ -429,6 +435,7 @@ class ChatService: "previous_response_id": conversation_state.get( "previous_response_id" ), + "filter_id": conversation_state.get("filter_id"), "total_messages": len(messages), "source": "in_memory", } @@ -447,6 +454,7 @@ class ChatService: "created_at": metadata.get("created_at"), "last_activity": metadata.get("last_activity"), "previous_response_id": metadata.get("previous_response_id"), + "filter_id": metadata.get("filter_id"), "total_messages": metadata.get("total_messages", 0), "source": "metadata_only", } @@ -545,6 +553,7 @@ class ChatService: or conversation.get("created_at"), "last_activity": metadata.get("last_activity") or conversation.get("last_activity"), + "filter_id": metadata.get("filter_id"), "total_messages": len(messages), "source": "langflow_enhanced", "langflow_session_id": session_id, @@ -632,4 +641,3 @@ class ChatService: except Exception as e: logger.error(f"Error deleting session {session_id} from Langflow: {e}") return False - diff --git a/src/services/models_service.py b/src/services/models_service.py index f26d0594..979bcec2 100644 --- a/src/services/models_service.py +++ b/src/services/models_service.py @@ -108,7 +108,7 @@ class ModelsService: else: logger.error(f"Failed to fetch OpenAI models: {response.status_code}") raise Exception( - f"OpenAI API returned status code {response.status_code}" + f"OpenAI API returned status code {response.status_code}, {response.text}" ) except Exception as e: diff --git a/src/services/search_service.py b/src/services/search_service.py index 3261511d..b0927d0f 100644 --- a/src/services/search_service.py +++ b/src/services/search_service.py @@ -1,7 +1,7 @@ import copy from typing import Any, Dict from agentd.tool_decorator import tool -from config.settings import EMBED_MODEL, clients, INDEX_NAME, get_embedding_model +from config.settings import EMBED_MODEL, clients, INDEX_NAME, get_embedding_model, WATSONX_EMBEDDING_DIMENSIONS from auth_context import get_auth_context from utils.logging_config import get_logger @@ -147,13 +147,38 @@ class SearchService: attempts = 0 last_exception = None + # Format model name for LiteLLM compatibility + # The patched client routes through LiteLLM for non-OpenAI providers + formatted_model = model_name + + # Skip if already has a provider prefix + if not any(model_name.startswith(prefix + "/") for prefix in ["openai", "ollama", "watsonx", "anthropic"]): + # Detect provider from model name characteristics: + # - Ollama: contains ":" (e.g., "nomic-embed-text:latest") + # - WatsonX: check against known IBM embedding models + # - OpenAI: everything else (no prefix needed) + + if ":" in model_name: + # Ollama models use tags with colons + formatted_model = f"ollama/{model_name}" + logger.debug(f"Formatted Ollama model: {model_name} -> {formatted_model}") + elif model_name in WATSONX_EMBEDDING_DIMENSIONS: + # WatsonX embedding models - use hardcoded list from settings + formatted_model = f"watsonx/{model_name}" + logger.debug(f"Formatted WatsonX model: {model_name} -> {formatted_model}") + # else: OpenAI models don't need a prefix + while attempts < MAX_EMBED_RETRIES: attempts += 1 try: - resp = await clients.patched_async_client.embeddings.create( - model=model_name, input=[query] + resp = await clients.patched_embedding_client.embeddings.create( + model=formatted_model, input=[query] ) - return model_name, resp.data[0].embedding + # Try to get embedding - some providers return .embedding, others return ['embedding'] + embedding = getattr(resp.data[0], 'embedding', None) + if embedding is None: + embedding = resp.data[0]['embedding'] + return model_name, embedding except Exception as e: last_exception = e if attempts >= MAX_EMBED_RETRIES: