diff --git a/flows/components/opensearch.py b/flows/components/opensearch.py index f453d8be..7902161b 100644 --- a/flows/components/opensearch.py +++ b/flows/components/opensearch.py @@ -4,12 +4,11 @@ import copy import json import time import uuid -from typing import Any, List, Optional - from concurrent.futures import ThreadPoolExecutor, as_completed +from typing import Any from opensearchpy import OpenSearch, helpers -from opensearchpy.exceptions import RequestError +from opensearchpy.exceptions import OpenSearchException, RequestError from lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store from lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection @@ -50,11 +49,12 @@ def get_embedding_field_name(model_name: str) -> str: Returns: Field name in format: chunk_embedding_{normalized_model_name} """ + logger.info(f"chunk_embedding_{normalize_model_name(model_name)}") return f"chunk_embedding_{normalize_model_name(model_name)}" @vector_store_connection -class OpenSearchVectorStoreComponent(LCVectorStoreComponent): +class OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent): """OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities. This component provides vector storage and retrieval using OpenSearch, combining semantic @@ -73,9 +73,15 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): - Parallel query embedding generation for all detected models - Vector storage with configurable engines (jvector, nmslib, faiss, lucene) - Flexible authentication (Basic auth, JWT tokens) + + Model Name Resolution: + - Priority: deployment > model > model_name attributes + - This ensures correct matching between embedding objects and index fields + - When multiple embeddings are provided, specify embedding_model_name to select which one to use + - During search, each detected model in the index is matched to its corresponding embedding object """ - display_name: str = "OpenSearch (Multi-Model)" + display_name: str = "OpenSearch (Multi-Model Multi-Embedding)" icon: str = "OpenSearch" description: str = ( "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search." @@ -130,7 +136,7 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): }, ], value=[], - input_types=["Data"] + input_types=["Data"], ), StrInput( name="opensearch_url", @@ -203,16 +209,19 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): advanced=True, ), *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc. - HandleInput(name="embedding", display_name="Embedding", input_types=["Embeddings"]), + HandleInput(name="embedding", display_name="Embedding", input_types=["Embeddings"], is_list=True), StrInput( name="embedding_model_name", display_name="Embedding Model Name", value="", info=( - "Name of the embedding model being used (e.g., 'text-embedding-3-small'). " - "Used to create dynamic vector field names and track which model embedded each document. " - "Auto-detected from embedding component if not specified." + "Name of the embedding model to use for ingestion. This selects which embedding from the list " + "will be used to embed documents. Matches on deployment, model, model_id, or model_name. " + "For duplicate deployments, use combined format: 'deployment:model' " + "(e.g., 'text-embedding-ada-002:text-embedding-3-large'). " + "Leave empty to use the first embedding. Error message will show all available identifiers." ), + advanced=False, ), StrInput( name="vector_field", @@ -265,20 +274,20 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): name="username", display_name="Username", value="admin", - show=False, + show=True, ), SecretStrInput( name="password", display_name="OpenSearch Password", value="admin", - show=False, + show=True, ), SecretStrInput( name="jwt_token", display_name="JWT Token", value="JWT", load_from_db=False, - show=True, + show=False, info=( "Valid JSON Web Token for authentication. " "Will be sent in the Authorization header (with optional 'Bearer ' prefix)." @@ -318,9 +327,16 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): ), ] - def _get_embedding_model_name(self) -> str: + def _get_embedding_model_name(self, embedding_obj=None) -> str: """Get the embedding model name from component config or embedding object. + Priority: deployment > model > model_id > model_name + This ensures we use the actual model being deployed, not just the configured model. + Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.) + + Args: + embedding_obj: Specific embedding object to get name from (optional) + Returns: Embedding model name @@ -331,17 +347,46 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): if hasattr(self, "embedding_model_name") and self.embedding_model_name: return self.embedding_model_name.strip() - # Try to get from embedding component + # Try to get from provided embedding object + if embedding_obj: + # Priority: deployment > model > model_id > model_name + if hasattr(embedding_obj, "deployment") and embedding_obj.deployment: + return str(embedding_obj.deployment) + if hasattr(embedding_obj, "model") and embedding_obj.model: + return str(embedding_obj.model) + if hasattr(embedding_obj, "model_id") and embedding_obj.model_id: + return str(embedding_obj.model_id) + if hasattr(embedding_obj, "model_name") and embedding_obj.model_name: + return str(embedding_obj.model_name) + + # Try to get from embedding component (legacy single embedding) if hasattr(self, "embedding") and self.embedding: - if hasattr(self.embedding, "model"): - return str(self.embedding.model) - if hasattr(self.embedding, "model_name"): - return str(self.embedding.model_name) + # Handle list of embeddings + if isinstance(self.embedding, list) and len(self.embedding) > 0: + first_emb = self.embedding[0] + if hasattr(first_emb, "deployment") and first_emb.deployment: + return str(first_emb.deployment) + if hasattr(first_emb, "model") and first_emb.model: + return str(first_emb.model) + if hasattr(first_emb, "model_id") and first_emb.model_id: + return str(first_emb.model_id) + if hasattr(first_emb, "model_name") and first_emb.model_name: + return str(first_emb.model_name) + # Handle single embedding + elif not isinstance(self.embedding, list): + if hasattr(self.embedding, "deployment") and self.embedding.deployment: + return str(self.embedding.deployment) + if hasattr(self.embedding, "model") and self.embedding.model: + return str(self.embedding.model) + if hasattr(self.embedding, "model_id") and self.embedding.model_id: + return str(self.embedding.model_id) + if hasattr(self.embedding, "model_name") and self.embedding.model_name: + return str(self.embedding.model_name) msg = ( "Could not determine embedding model name. " "Please set the 'embedding_model_name' field or ensure the embedding component " - "has a 'model' or 'model_name' attribute." + "has a 'deployment', 'model', 'model_id', or 'model_name' attribute." ) raise ValueError(msg) @@ -434,12 +479,8 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): }, }, # Also ensure the embedding_model tracking field exists as keyword - "embedding_model": { - "type": "keyword" - }, - "embedding_dimensions": { - "type": "integer" - } + "embedding_model": {"type": "keyword"}, + "embedding_dimensions": {"type": "integer"}, } } client.indices.put_mapping(index=index_name, body=mapping) @@ -450,9 +491,9 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): properties = self._get_index_properties(client) if not self._is_knn_vector_field(properties, field_name): - raise ValueError( - f"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}" - ) + msg = f"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}" + logger.aerror(msg) + raise ValueError(msg) def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None: """Validate engine compatibility with Amazon OpenSearch Serverless (AOSS). @@ -600,8 +641,15 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): @check_cached_vector_store def build_vector_store(self) -> OpenSearch: # Return raw OpenSearch client as our "vector store." - self.log(self.ingest_data) client = self.build_client() + + # Check if we're in ingestion-only mode (no search query) + has_search_query = bool((self.search_query or "").strip()) + if not has_search_query: + logger.debug("Ingestion-only mode activated: search operations will be skipped") + logger.debug("Starting ingestion mode...") + + logger.warning(f"Embedding: {self.embedding}") self._add_documents_to_vector_store(client=client) return client @@ -611,33 +659,185 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): This method handles the complete document ingestion pipeline: - Prepares document data and metadata - - Generates vector embeddings + - Generates vector embeddings using the selected model - Creates appropriate index mappings with dynamic field names - Bulk inserts documents with vectors and model tracking Args: client: OpenSearch client for performing operations """ + logger.debug("[INGESTION] _add_documents_to_vector_store called") # Convert DataFrame to Data if needed using parent's method self.ingest_data = self._prepare_ingest_data() + logger.debug( + f"[INGESTION] ingest_data type: " + f"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}" + ) + logger.debug( + f"[INGESTION] ingest_data content: " + f"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}" + ) + docs = self.ingest_data or [] if not docs: - self.log("No documents to ingest.") + logger.debug("Ingestion complete: No documents provided") return - # Get embedding model name - embedding_model = self._get_embedding_model_name() + if not self.embedding: + msg = "Embedding handle is required to embed documents." + raise ValueError(msg) + + # Normalize embedding to list first + embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding] + + # Filter out None values (fail-safe mode) - do this BEFORE checking if empty + embeddings_list = [e for e in embeddings_list if e is not None] + + # NOW check if we have any valid embeddings left after filtering + if not embeddings_list: + logger.warning("All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.") + self.log("Embedding returned None (fail-safe mode enabled). Skipping document ingestion.") + return + + logger.debug(f"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}") + self.log(f"Available embedding models: {len(embeddings_list)}") + + # Select the embedding to use for ingestion + selected_embedding = None + embedding_model = None + + # If embedding_model_name is specified, find matching embedding + if hasattr(self, "embedding_model_name") and self.embedding_model_name and self.embedding_model_name.strip(): + target_model_name = self.embedding_model_name.strip() + self.log(f"Looking for embedding model: {target_model_name}") + + for emb_obj in embeddings_list: + # Check all possible model identifiers (deployment, model, model_id, model_name) + # Also check available_models list from EmbeddingsWithModels + possible_names = [] + deployment = getattr(emb_obj, "deployment", None) + model = getattr(emb_obj, "model", None) + model_id = getattr(emb_obj, "model_id", None) + model_name = getattr(emb_obj, "model_name", None) + available_models_attr = getattr(emb_obj, "available_models", None) + + if deployment: + possible_names.append(str(deployment)) + if model: + possible_names.append(str(model)) + if model_id: + possible_names.append(str(model_id)) + if model_name: + possible_names.append(str(model_name)) + + # Also add combined identifier + if deployment and model and deployment != model: + possible_names.append(f"{deployment}:{model}") + + # Add all models from available_models dict + if available_models_attr and isinstance(available_models_attr, dict): + possible_names.extend( + str(model_key).strip() + for model_key in available_models_attr + if model_key and str(model_key).strip() + ) + + # Match if target matches any of the possible names + if target_model_name in possible_names: + # Check if target is in available_models dict - use dedicated instance + if ( + available_models_attr + and isinstance(available_models_attr, dict) + and target_model_name in available_models_attr + ): + # Use the dedicated embedding instance from the dict + selected_embedding = available_models_attr[target_model_name] + embedding_model = target_model_name + self.log(f"Found dedicated embedding instance for '{embedding_model}' in available_models dict") + else: + # Traditional identifier match + selected_embedding = emb_obj + embedding_model = self._get_embedding_model_name(emb_obj) + self.log(f"Found matching embedding model: {embedding_model} (matched on: {target_model_name})") + break + + if not selected_embedding: + # Build detailed list of available embeddings with all their identifiers + available_info = [] + for idx, emb in enumerate(embeddings_list): + emb_type = type(emb).__name__ + identifiers = [] + deployment = getattr(emb, "deployment", None) + model = getattr(emb, "model", None) + model_id = getattr(emb, "model_id", None) + model_name = getattr(emb, "model_name", None) + available_models_attr = getattr(emb, "available_models", None) + + if deployment: + identifiers.append(f"deployment='{deployment}'") + if model: + identifiers.append(f"model='{model}'") + if model_id: + identifiers.append(f"model_id='{model_id}'") + if model_name: + identifiers.append(f"model_name='{model_name}'") + + # Add combined identifier as an option + if deployment and model and deployment != model: + identifiers.append(f"combined='{deployment}:{model}'") + + # Add available_models dict if present + if available_models_attr and isinstance(available_models_attr, dict): + identifiers.append(f"available_models={list(available_models_attr.keys())}") + + available_info.append( + f" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}" + ) + + msg = ( + f"Embedding model '{target_model_name}' not found in available embeddings.\n\n" + f"Available embeddings:\n" + "\n".join(available_info) + "\n\n" + "Please set 'embedding_model_name' to one of the identifier values shown above " + "(use the value after the '=' sign, without quotes).\n" + "For duplicate deployments, use the 'combined' format.\n" + "Or leave it empty to use the first embedding." + ) + raise ValueError(msg) + else: + # Use first embedding if no model name specified + selected_embedding = embeddings_list[0] + embedding_model = self._get_embedding_model_name(selected_embedding) + self.log(f"No embedding_model_name specified, using first embedding: {embedding_model}") + dynamic_field_name = get_embedding_field_name(embedding_model) - self.log(f"Using embedding model: {embedding_model}") + logger.info(f"Selected embedding model for ingestion: '{embedding_model}'") + self.log(f"Using embedding model for ingestion: {embedding_model}") self.log(f"Dynamic vector field: {dynamic_field_name}") + # Log embedding details for debugging + if hasattr(selected_embedding, "deployment"): + logger.info(f"Embedding deployment: {selected_embedding.deployment}") + if hasattr(selected_embedding, "model"): + logger.info(f"Embedding model: {selected_embedding.model}") + if hasattr(selected_embedding, "model_id"): + logger.info(f"Embedding model_id: {selected_embedding.model_id}") + if hasattr(selected_embedding, "dimensions"): + logger.info(f"Embedding dimensions: {selected_embedding.dimensions}") + if hasattr(selected_embedding, "available_models"): + logger.info(f"Embedding available_models: {selected_embedding.available_models}") + + # No model switching needed - each model in available_models has its own dedicated instance + # The selected_embedding is already configured correctly for the target model + logger.info(f"Using embedding instance for '{embedding_model}' - pre-configured and ready to use") + # Extract texts and metadata from documents texts = [] metadatas = [] # Process docs_metadata table input into a dict additional_metadata = {} + logger.debug(f"[LF] Docs metadata {self.docs_metadata}") if hasattr(self, "docs_metadata") and self.docs_metadata: logger.info(f"[LF] Docs metadata {self.docs_metadata}") if isinstance(self.docs_metadata[-1], Data): @@ -664,23 +864,27 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): metadatas.append(data_copy) self.log(metadatas) - if not self.embedding: - msg = "Embedding handle is required to embed documents." - raise ValueError(msg) # Generate embeddings (threaded for concurrency) with retries def embed_chunk(chunk_text: str) -> list[float]: - return self.embedding.embed_documents([chunk_text])[0] + return selected_embedding.embed_documents([chunk_text])[0] - vectors: Optional[List[List[float]]] = None - last_exception: Optional[Exception] = None + vectors: list[list[float]] | None = None + last_exception: Exception | None = None delay = 1.0 attempts = 0 + max_attempts = 3 - while attempts < 3: + while attempts < max_attempts: attempts += 1 try: - max_workers = min(max(len(texts), 1), 8) + # Restrict concurrency for IBM/Watsonx models to avoid rate limits + is_ibm = (embedding_model and "ibm" in str(embedding_model).lower()) or ( + selected_embedding and "watsonx" in type(selected_embedding).__name__.lower() + ) + logger.debug(f"Is IBM: {is_ibm}") + max_workers = 1 if is_ibm else min(max(len(texts), 1), 8) + with ThreadPoolExecutor(max_workers=max_workers) as executor: futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)} vectors = [None] * len(texts) @@ -690,16 +894,17 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): break except Exception as exc: last_exception = exc - if attempts >= 3: + if attempts >= max_attempts: logger.error( - "Embedding generation failed after retries", + f"Embedding generation failed for model {embedding_model} after retries", error=str(exc), ) raise logger.warning( - "Threaded embedding generation failed (attempt %s/%s), retrying in %.1fs", + "Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs", + embedding_model, attempts, - 3, + max_attempts, delay, ) time.sleep(delay) @@ -707,11 +912,13 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): if vectors is None: raise RuntimeError( - f"Embedding generation failed: {last_exception}" if last_exception else "Embedding generation failed" + f"Embedding generation failed for {embedding_model}: {last_exception}" + if last_exception + else f"Embedding generation failed for {embedding_model}" ) if not vectors: - self.log("No vectors generated from documents.") + self.log(f"No vectors generated from documents for model {embedding_model}.") return # Get vector dimension for mapping @@ -746,9 +953,7 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): client.indices.create(index=self.index_name, body=mapping) except RequestError as creation_error: if creation_error.error != "resource_already_exists_exception": - logger.warning( - f"Failed to create index '{self.index_name}': {creation_error}" - ) + logger.warning(f"Failed to create index '{self.index_name}': {creation_error}") # Ensure the dynamic field exists in the index self._ensure_embedding_field_mapping( @@ -763,6 +968,8 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): ) self.log(f"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...") + logger.info(f"Will store embeddings in field: {dynamic_field_name}") + logger.info(f"Will tag documents with embedding_model: {embedding_model}") # Use the bulk ingestion with model tracking return_ids = self._bulk_ingest_embeddings( @@ -779,6 +986,9 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): ) self.log(metadatas) + logger.info( + f"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'" + ) self.log(f"Successfully indexed {len(return_ids)} documents with model {embedding_model}.") # ---------- helpers for filters ---------- @@ -853,7 +1063,7 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): context_clauses.append({"terms": {field: values}}) return context_clauses - def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] = None) -> list[str]: + def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]: """Detect which embedding models have documents in the index. Uses aggregation to find all unique embedding_model values, optionally @@ -867,26 +1077,13 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): List of embedding model names found in the index """ try: - agg_query = { - "size": 0, - "aggs": { - "embedding_models": { - "terms": { - "field": "embedding_model", - "size": 10 - } - } - } - } + agg_query = {"size": 0, "aggs": {"embedding_models": {"terms": {"field": "embedding_model", "size": 10}}}} # Apply filters to model detection if any exist if filter_clauses: - agg_query["query"] = { - "bool": { - "filter": filter_clauses - } - } + agg_query["query"] = {"bool": {"filter": filter_clauses}} + logger.debug(f"Model detection query: {agg_query}") result = client.search( index=self.index_name, body=agg_query, @@ -895,21 +1092,33 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): buckets = result.get("aggregations", {}).get("embedding_models", {}).get("buckets", []) models = [b["key"] for b in buckets if b["key"]] + # Log detailed bucket info for debugging logger.info( f"Detected embedding models in corpus: {models}" + (f" (with {len(filter_clauses)} filters)" if filter_clauses else "") ) - return models - except Exception as e: + if not models: + total_hits = result.get("hits", {}).get("total", {}) + total_count = total_hits.get("value", 0) if isinstance(total_hits, dict) else total_hits + logger.warning( + f"No embedding_model values found in index '{self.index_name}'. " + f"Total docs in index: {total_count}. " + f"This may indicate documents were indexed without the embedding_model field." + ) + except (OpenSearchException, KeyError, ValueError) as e: logger.warning(f"Failed to detect embedding models: {e}") # Fallback to current model - return [self._get_embedding_model_name()] + fallback_model = self._get_embedding_model_name() + logger.info(f"Using fallback model: {fallback_model}") + return [fallback_model] + else: + return models def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None: """Retrieve flattened mapping properties for the current index.""" try: mapping = client.indices.get_mapping(index=self.index_name) - except Exception as e: + except OpenSearchException as e: logger.warning( f"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata." ) @@ -927,9 +1136,7 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): if not field_name: return False if properties is None: - logger.warning( - f"Mapping metadata unavailable; assuming field '{field_name}' is usable." - ) + logger.warning(f"Mapping metadata unavailable; assuming field '{field_name}' is usable.") return True field_def = properties.get(field_name) if not isinstance(field_def, dict): @@ -938,10 +1145,35 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): return True nested_props = field_def.get("properties") - if isinstance(nested_props, dict) and nested_props.get("type") == "knn_vector": - return True + return bool(isinstance(nested_props, dict) and nested_props.get("type") == "knn_vector") - return False + def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None: + """Get the dimension of a knn_vector field from the index mapping. + + Args: + properties: Index properties from mapping + field_name: Name of the vector field + + Returns: + Dimension of the field, or None if not found + """ + if not field_name or properties is None: + return None + + field_def = properties.get(field_name) + if not isinstance(field_def, dict): + return None + + # Check direct knn_vector field + if field_def.get("type") == "knn_vector": + return field_def.get("dimension") + + # Check nested properties + nested_props = field_def.get("properties") + if isinstance(nested_props, dict) and nested_props.get("type") == "knn_vector": + return nested_props.get("dimension") + + return None # ---------- search (multi-model hybrid) ---------- def search(self, query: str | None = None) -> list[dict[str, Any]]: @@ -985,6 +1217,11 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): msg = "Embedding is required to run hybrid search (KNN + keyword)." raise ValueError(msg) + # Check if embedding is None (fail-safe mode) + if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)): + logger.error("Embedding returned None (fail-safe mode enabled). Cannot perform search.") + return [] + # Build filter clauses first so we can use them in model detection filter_clauses = self._coerce_filter_clauses(filter_obj) @@ -995,42 +1232,166 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): logger.warning("No embedding models found in index, using current model") available_models = [self._get_embedding_model_name()] - # Generate embeddings for ALL detected models in parallel + # Generate embeddings for ALL detected models query_embeddings = {} - # Note: Langflow is synchronous, so we can't use true async here - # But we log the intent for parallel processing - logger.info(f"Generating embeddings for {len(available_models)} models") + # Normalize embedding to list + embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding] + # Filter out None values (fail-safe mode) + embeddings_list = [e for e in embeddings_list if e is not None] - original_model_attr = getattr(self.embedding, "model", None) - original_deployment_attr = getattr(self.embedding, "deployment", None) - original_dimensions_attr = getattr(self.embedding, "dimensions", None) + if not embeddings_list: + logger.error( + "No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search." + ) + return [] + + # Create a comprehensive map of model names to embedding objects + # Check all possible identifiers (deployment, model, model_id, model_name) + # Also leverage available_models list from EmbeddingsWithModels + # Handle duplicate identifiers by creating combined keys + embedding_by_model = {} + identifier_conflicts = {} # Track which identifiers have conflicts + + for idx, emb_obj in enumerate(embeddings_list): + # Get all possible identifiers for this embedding + identifiers = [] + deployment = getattr(emb_obj, "deployment", None) + model = getattr(emb_obj, "model", None) + model_id = getattr(emb_obj, "model_id", None) + model_name = getattr(emb_obj, "model_name", None) + dimensions = getattr(emb_obj, "dimensions", None) + available_models_attr = getattr(emb_obj, "available_models", None) + + logger.info( + f"Embedding object {idx}: deployment={deployment}, model={model}, " + f"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, " + f"available_models={available_models_attr}" + ) + + # If this embedding has available_models dict, map all models to their dedicated instances + if available_models_attr and isinstance(available_models_attr, dict): + logger.info( + f"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict" + ) + for model_name_key, dedicated_embedding in available_models_attr.items(): + if model_name_key and str(model_name_key).strip(): + model_str = str(model_name_key).strip() + if model_str not in embedding_by_model: + # Use the dedicated embedding instance from the dict + embedding_by_model[model_str] = dedicated_embedding + logger.info(f"Mapped available model '{model_str}' to dedicated embedding instance") + else: + # Conflict detected - track it + if model_str not in identifier_conflicts: + identifier_conflicts[model_str] = [embedding_by_model[model_str]] + identifier_conflicts[model_str].append(dedicated_embedding) + logger.warning(f"Available model '{model_str}' has conflict - used by multiple embeddings") + + # Also map traditional identifiers (for backward compatibility) + if deployment: + identifiers.append(str(deployment)) + if model: + identifiers.append(str(model)) + if model_id: + identifiers.append(str(model_id)) + if model_name: + identifiers.append(str(model_name)) + + # Map all identifiers to this embedding object + for identifier in identifiers: + if identifier not in embedding_by_model: + embedding_by_model[identifier] = emb_obj + logger.info(f"Mapped identifier '{identifier}' to embedding object {idx}") + else: + # Conflict detected - track it + if identifier not in identifier_conflicts: + identifier_conflicts[identifier] = [embedding_by_model[identifier]] + identifier_conflicts[identifier].append(emb_obj) + logger.warning(f"Identifier '{identifier}' has conflict - used by multiple embeddings") + + # For embeddings with model+deployment, create combined identifier + # This helps when deployment is the same but model differs + if deployment and model and deployment != model: + combined_id = f"{deployment}:{model}" + if combined_id not in embedding_by_model: + embedding_by_model[combined_id] = emb_obj + logger.info(f"Created combined identifier '{combined_id}' for embedding object {idx}") + + # Log conflicts + if identifier_conflicts: + logger.warning( + f"Found {len(identifier_conflicts)} conflicting identifiers. " + f"Consider using combined format 'deployment:model' or specifying unique model names." + ) + for conflict_id, emb_list in identifier_conflicts.items(): + logger.warning(f" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier") + + logger.info(f"Generating embeddings for {len(available_models)} models in index") + logger.info(f"Available embedding identifiers: {list(embedding_by_model.keys())}") + self.log(f"[SEARCH] Models detected in index: {available_models}") + self.log(f"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}") + + # Track matching status for debugging + matched_models = [] + unmatched_models = [] for model_name in available_models: try: - # In a real async environment, these would run in parallel - # For now, they run sequentially - if hasattr(self.embedding, "model"): - setattr(self.embedding, "model", model_name) - if hasattr(self.embedding, "deployment"): - setattr(self.embedding, "deployment", model_name) - if hasattr(self.embedding, "dimensions"): - setattr(self.embedding, "dimensions", None) - vec = self.embedding.embed_query(q) - query_embeddings[model_name] = vec - logger.info(f"Generated embedding for model: {model_name}") - except Exception as e: - logger.error(f"Failed to generate embedding for {model_name}: {e}") + # Check if we have an embedding object for this model + if model_name in embedding_by_model: + # Use the matching embedding object directly + emb_obj = embedding_by_model[model_name] + emb_deployment = getattr(emb_obj, "deployment", None) + emb_model = getattr(emb_obj, "model", None) + emb_model_id = getattr(emb_obj, "model_id", None) + emb_dimensions = getattr(emb_obj, "dimensions", None) + emb_available_models = getattr(emb_obj, "available_models", None) - if hasattr(self.embedding, "model"): - setattr(self.embedding, "model", original_model_attr) - if hasattr(self.embedding, "deployment"): - setattr(self.embedding, "deployment", original_deployment_attr) - if hasattr(self.embedding, "dimensions"): - setattr(self.embedding, "dimensions", original_dimensions_attr) + logger.info( + f"Using embedding object for model '{model_name}': " + f"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, " + f"dimensions={emb_dimensions}" + ) + + # Check if this is a dedicated instance from available_models dict + if emb_available_models and isinstance(emb_available_models, dict): + logger.info( + f"Model '{model_name}' using dedicated instance from available_models dict " + f"(pre-configured with correct model and dimensions)" + ) + + # Use the embedding instance directly - no model switching needed! + vec = emb_obj.embed_query(q) + query_embeddings[model_name] = vec + matched_models.append(model_name) + logger.info(f"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})") + self.log(f"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding") + else: + # No matching embedding found for this model + unmatched_models.append(model_name) + logger.warning( + f"No matching embedding found for model '{model_name}'. " + f"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}" + ) + self.log(f"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}") + except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e: + logger.warning(f"Failed to generate embedding for {model_name}: {e}") + self.log(f"[ERROR] Embedding generation failed for '{model_name}': {e}") + + # Log summary of model matching + logger.info(f"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched") + self.log(f"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched") + if unmatched_models: + self.log(f"[WARN] Unmatched models in index: {unmatched_models}") if not query_embeddings: - msg = "Failed to generate embeddings for any model" + msg = ( + f"Failed to generate embeddings for any model. " + f"Index has models: {available_models}, but no matching embedding objects found. " + f"Available embedding identifiers: {list(embedding_by_model.keys())}" + ) + self.log(f"[FAIL] Search failed: {msg}") raise ValueError(msg) index_properties = self._get_index_properties(client) @@ -1051,6 +1412,7 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): for model_name, embedding_vector in query_embeddings.items(): field_name = get_embedding_field_name(model_name) selected_field = field_name + vector_dim = len(embedding_vector) # Only use the expected dynamic field - no legacy fallback # This prevents dimension mismatches between models @@ -1059,8 +1421,24 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): f"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. " f"Documents must be indexed with this embedding model before querying." ) + self.log(f"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'") continue + # Validate vector dimensions match the field dimensions + field_dim = self._get_field_dimension(index_properties, selected_field) + if field_dim is not None and field_dim != vector_dim: + logger.error( + f"Dimension mismatch for model '{model_name}': " + f"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. " + f"Skipping this model to prevent search errors." + ) + self.log(f"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping") + continue + + logger.info( + f"Adding KNN query for model '{model_name}': field='{selected_field}', " + f"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}" + ) embedding_fields.append(selected_field) base_query = { @@ -1091,14 +1469,16 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): "This may indicate an empty index or missing field mappings. " "Returning empty search results." ) + self.log( + f"[WARN] No valid KNN queries could be built. " + f"Query embeddings generated: {list(query_embeddings.keys())}, " + f"but no matching knn_vector fields found in index." + ) return [] # Build exists filter - document must have at least one embedding field exists_any_embedding = { - "bool": { - "should": [{"exists": {"field": f}} for f in set(embedding_fields)], - "minimum_should_match": 1 - } + "bool": {"should": [{"exists": {"field": f}} for f in set(embedding_fields)], "minimum_should_match": 1} } # Combine user filters with exists filter @@ -1117,7 +1497,7 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): "dis_max": { "tie_breaker": 0.0, # Take only the best match, no blending "boost": 0.7, # 70% weight for semantic search - "queries": knn_queries_with_candidates + "queries": knn_queries_with_candidates, } }, { @@ -1158,13 +1538,15 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): body["min_score"] = score_threshold logger.info( - f"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models" + f"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: " + f"{list(query_embeddings.keys())}" ) + self.log(f"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}") + self.log(f"[EXEC] Embedding models used: {list(query_embeddings.keys())}") + self.log(f"[EXEC] KNN fields being queried: {embedding_fields}") try: - resp = client.search( - index=self.index_name, body=body, params={"terminate_after": 0} - ) + resp = client.search(index=self.index_name, body=body, params={"terminate_after": 0}) except RequestError as e: error_message = str(e) lowered = error_message.lower() @@ -1215,6 +1597,16 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): hits = resp.get("hits", {}).get("hits", []) logger.info(f"Found {len(hits)} results") + self.log(f"[RESULT] Search complete: {len(hits)} results found") + + if len(hits) == 0: + self.log( + f"[EMPTY] Debug info: " + f"models_in_index={available_models}, " + f"matched_models={matched_models}, " + f"knn_fields={embedding_fields}, " + f"filters={len(filter_clauses)} clauses" + ) return [ { @@ -1231,6 +1623,9 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): This is the main interface method that performs the multi-model search using the configured search_query and returns results in Langflow's Data format. + Always builds the vector store (triggering ingestion if needed), then performs + search only if a query is provided. + Returns: List of Data objects containing search results with text and metadata @@ -1238,9 +1633,20 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): Exception: If search operation fails """ try: - raw = self.search(self.search_query or "") + # Always build/cache the vector store to ensure ingestion happens + logger.info(f"Search query: {self.search_query}") + if self._cached_vector_store is None: + self.build_vector_store() + + # Only perform search if query is provided + search_query = (self.search_query or "").strip() + if not search_query: + self.log("No search query provided - ingestion completed, returning empty results") + return [] + + # Perform search with the provided query + raw = self.search(search_query) return [Data(text=hit["page_content"], **hit["metadata"]) for hit in raw] - self.log(self.ingest_data) except Exception as e: self.log(f"search_documents error: {e}") raise @@ -1280,9 +1686,6 @@ class OpenSearchVectorStoreComponent(LCVectorStoreComponent): build_config["jwt_header"]["required"] = is_jwt build_config["bearer_prefix"]["required"] = False - if is_basic: - build_config["jwt_token"]["value"] = "" - return build_config except (KeyError, ValueError) as e: diff --git a/flows/ingestion_flow.json b/flows/ingestion_flow.json index 8d8de7c2..39c73c40 100644 --- a/flows/ingestion_flow.json +++ b/flows/ingestion_flow.json @@ -262,7 +262,7 @@ }, { "animated": false, - "className": "not-running", + "className": "", "data": { "sourceHandle": { "dataType": "AdvancedDynamicFormBuilder", @@ -667,7 +667,7 @@ ], "frozen": false, "icon": "braces", - "last_updated": "2025-11-26T04:50:14.309Z", + "last_updated": "2025-12-03T21:41:00.148Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": {}, @@ -717,7 +717,7 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" + "value": "79455c62-cdb1-4f14-bf44-8e76acc020a6" }, "_type": "Component", "code": { @@ -1556,7 +1556,15 @@ "trace_as_metadata": true, "type": "NestedDict", "value": { - "do_ocr": false + "do_ocr": false, + "do_picture_classification": false, + "do_picture_description": false, + "do_table_structure": true, + "ocr_engine": "easyocr", + "picture_description_local": { + "prompt": "Describe this image in a few sentences.", + "repo_id": "HuggingFaceTB/SmolVLM-256M-Instruct" + } } }, "file_path": { @@ -2052,7 +2060,7 @@ ], "frozen": false, "icon": "table", - "last_updated": "2025-11-26T04:50:14.433Z", + "last_updated": "2025-12-03T21:41:00.319Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": { @@ -2099,7 +2107,7 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" + "value": "79455c62-cdb1-4f14-bf44-8e76acc020a6" }, "_type": "Component", "ascending": { @@ -2503,7 +2511,7 @@ ], "frozen": false, "icon": "table", - "last_updated": "2025-11-26T04:50:14.433Z", + "last_updated": "2025-12-03T21:41:00.320Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": { @@ -2550,7 +2558,7 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" + "value": "79455c62-cdb1-4f14-bf44-8e76acc020a6" }, "_type": "Component", "ascending": { @@ -2954,7 +2962,7 @@ ], "frozen": false, "icon": "table", - "last_updated": "2025-11-26T04:50:14.434Z", + "last_updated": "2025-12-03T21:41:00.320Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": { @@ -3001,7 +3009,7 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" + "value": "79455c62-cdb1-4f14-bf44-8e76acc020a6" }, "_type": "Component", "ascending": { @@ -3444,10 +3452,10 @@ ], "frozen": false, "icon": "OpenSearch", - "last_updated": "2025-11-26T05:11:48.277Z", + "last_updated": "2025-12-03T21:41:24.832Z", "legacy": false, "metadata": { - "code_hash": "000397b17863", + "code_hash": "00bd730431a2", "dependencies": { "dependencies": [ { @@ -3527,7 +3535,7 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" + "value": "79455c62-cdb1-4f14-bf44-8e76acc020a6" }, "_type": "Component", "auth_mode": { @@ -3595,7 +3603,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n \n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"🔄 Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"Starting ingestion mode...\")\n \n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n \n logger.debug(f\"[INGESTION] ingest_data type: {type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\")\n logger.debug(f\"[INGESTION] ingest_data content: {self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\")\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"✓ Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n \n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n \n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n \n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"✓ Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n max_workers = min(max(len(texts), 1), 8)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(f\"✓ Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\")\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n return [self._get_embedding_model_name()]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n \n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n \n if not embeddings_list:\n logger.error(\"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\")\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models and isinstance(available_models, dict):\n logger.info(f\"Embedding object {idx} provides {len(available_models)} models via available_models dict\")\n for model_name_key, dedicated_embedding in available_models.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n else:\n # No matching embedding found for this model\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available models: {list(embedding_by_model.keys())}\"\n )\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n\n if not query_embeddings:\n msg = \"Failed to generate embeddings for any model\"\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n if self._cached_vector_store is None:\n self.build_vector_store()\n \n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n \n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" + "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n\n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"Starting ingestion mode...\")\n\n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n logger.debug(\n f\"[INGESTION] ingest_data type: \"\n f\"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\"\n )\n logger.debug(\n f\"[INGESTION] ingest_data content: \"\n f\"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\"\n )\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n # Restrict concurrency for IBM/Watsonx models to avoid rate limits\n is_ibm = (embedding_model and \"ibm\" in str(embedding_model).lower()) or (\n selected_embedding and \"watsonx\" in type(selected_embedding).__name__.lower()\n )\n logger.debug(f\"Is IBM: {is_ibm}\")\n max_workers = 1 if is_ibm else min(max(len(texts), 1), 8)\n\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(\n f\"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\"\n )\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n logger.debug(f\"Model detection query: {agg_query}\")\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n # Log detailed bucket info for debugging\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n if not models:\n total_hits = result.get(\"hits\", {}).get(\"total\", {})\n total_count = total_hits.get(\"value\", 0) if isinstance(total_hits, dict) else total_hits\n logger.warning(\n f\"No embedding_model values found in index '{self.index_name}'. \"\n f\"Total docs in index: {total_count}. \"\n f\"This may indicate documents were indexed without the embedding_model field.\"\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n fallback_model = self._get_embedding_model_name()\n logger.info(f\"Using fallback model: {fallback_model}\")\n return [fallback_model]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n if not embeddings_list:\n logger.error(\n \"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\"\n )\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models_attr}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models_attr and isinstance(available_models_attr, dict):\n logger.info(\n f\"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict\"\n )\n for model_name_key, dedicated_embedding in available_models_attr.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n self.log(f\"[SEARCH] Models detected in index: {available_models}\")\n self.log(f\"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n # Track matching status for debugging\n matched_models = []\n unmatched_models = []\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n matched_models.append(model_name)\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n self.log(f\"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding\")\n else:\n # No matching embedding found for this model\n unmatched_models.append(model_name)\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}\")\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n self.log(f\"[ERROR] Embedding generation failed for '{model_name}': {e}\")\n\n # Log summary of model matching\n logger.info(f\"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n self.log(f\"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n if unmatched_models:\n self.log(f\"[WARN] Unmatched models in index: {unmatched_models}\")\n\n if not query_embeddings:\n msg = (\n f\"Failed to generate embeddings for any model. \"\n f\"Index has models: {available_models}, but no matching embedding objects found. \"\n f\"Available embedding identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[FAIL] Search failed: {msg}\")\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n self.log(f\"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'\")\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n self.log(f\"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping\")\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n self.log(\n f\"[WARN] No valid KNN queries could be built. \"\n f\"Query embeddings generated: {list(query_embeddings.keys())}, \"\n f\"but no matching knn_vector fields found in index.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: \"\n f\"{list(query_embeddings.keys())}\"\n )\n self.log(f\"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}\")\n self.log(f\"[EXEC] Embedding models used: {list(query_embeddings.keys())}\")\n self.log(f\"[EXEC] KNN fields being queried: {embedding_fields}\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n self.log(f\"[RESULT] Search complete: {len(hits)} results found\")\n\n if len(hits) == 0:\n self.log(\n f\"[EMPTY] Debug info: \"\n f\"models_in_index={available_models}, \"\n f\"matched_models={matched_models}, \"\n f\"knn_fields={embedding_fields}, \"\n f\"filters={len(filter_clauses)} clauses\"\n )\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n logger.info(f\"Search query: {self.search_query}\")\n if self._cached_vector_store is None:\n self.build_vector_store()\n\n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n\n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" }, "docs_metadata": { "_input_type": "TableInput", @@ -4155,7 +4163,7 @@ ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T04:50:14.312Z", + "last_updated": "2025-12-03T21:41:00.158Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": { @@ -4223,7 +4231,7 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" + "value": "79455c62-cdb1-4f14-bf44-8e76acc020a6" }, "_type": "Component", "api_base": { @@ -4436,11 +4444,7 @@ "load_from_db": false, "name": "model", "options": [ - "ibm/granite-embedding-278m-multilingual", - "ibm/slate-125m-english-rtrvr-v2", - "ibm/slate-30m-english-rtrvr-v2", - "intfloat/multilingual-e5-large", - "sentence-transformers/all-minilm-l6-v2" + "ibm/granite-embedding-278m-multilingual" ], "options_metadata": [], "override_skip": false, @@ -4684,7 +4688,7 @@ ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T04:50:14.312Z", + "last_updated": "2025-12-03T21:41:00.159Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": { @@ -4752,7 +4756,7 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" + "value": "79455c62-cdb1-4f14-bf44-8e76acc020a6" }, "_type": "Component", "api_base": { @@ -4965,8 +4969,9 @@ "load_from_db": false, "name": "model", "options": [ - "bge-large:latest", - "qwen3-embedding:4b" + "embeddinggemma:latest", + "mxbai-embed-large:latest", + "nomic-embed-text:latest" ], "options_metadata": [], "override_skip": false, @@ -4981,7 +4986,7 @@ "trace_as_metadata": true, "track_in_telemetry": true, "type": "str", - "value": "" + "value": "embeddinggemma:latest" }, "model_kwargs": { "_input_type": "DictInput", @@ -5210,7 +5215,7 @@ ], "frozen": false, "icon": "binary", - "last_updated": "2025-11-26T04:50:28.061Z", + "last_updated": "2025-12-03T21:41:00.159Z", "legacy": false, "lf_version": "1.7.0.dev21", "metadata": { @@ -5278,7 +5283,7 @@ "value": "5488df7c-b93f-4f87-a446-b67028bc0813" }, "_frontend_node_folder_id": { - "value": "131daebd-f11a-4072-9e20-1e1f903d01b0" + "value": "79455c62-cdb1-4f14-bf44-8e76acc020a6" }, "_type": "Component", "api_base": { @@ -5703,17 +5708,16 @@ } ], "viewport": { - "x": -579.5279887575305, - "y": -602.558848067286, - "zoom": 0.6251924232576567 + "x": -848.3573799283768, + "y": -648.7033245837173, + "zoom": 0.6472397864500404 } }, "description": "Load your data for chat context with Retrieval Augmented Generation.", "endpoint_name": null, "id": "5488df7c-b93f-4f87-a446-b67028bc0813", "is_component": false, - "locked": true, - "last_tested_version": "1.7.0.dev19", + "last_tested_version": "1.7.0.dev21", "name": "OpenSearch Ingestion Flow", "tags": [ "openai", diff --git a/flows/openrag_agent.json b/flows/openrag_agent.json index 700808a9..b307b118 100644 --- a/flows/openrag_agent.json +++ b/flows/openrag_agent.json @@ -2910,7 +2910,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n\n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"Starting ingestion mode...\")\n\n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n logger.debug(\n f\"[INGESTION] ingest_data type: \"\n f\"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\"\n )\n logger.debug(\n f\"[INGESTION] ingest_data content: \"\n f\"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\"\n )\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n max_workers = min(max(len(texts), 1), 8)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(\n f\"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\"\n )\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n logger.debug(f\"Model detection query: {agg_query}\")\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n # Log detailed bucket info for debugging\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n if not models:\n total_hits = result.get(\"hits\", {}).get(\"total\", {})\n total_count = total_hits.get(\"value\", 0) if isinstance(total_hits, dict) else total_hits\n logger.warning(\n f\"No embedding_model values found in index '{self.index_name}'. \"\n f\"Total docs in index: {total_count}. \"\n f\"This may indicate documents were indexed without the embedding_model field.\"\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n fallback_model = self._get_embedding_model_name()\n logger.info(f\"Using fallback model: {fallback_model}\")\n return [fallback_model]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n if not embeddings_list:\n logger.error(\n \"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\"\n )\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models_attr}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models_attr and isinstance(available_models_attr, dict):\n logger.info(\n f\"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict\"\n )\n for model_name_key, dedicated_embedding in available_models_attr.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n self.log(f\"[SEARCH] Models detected in index: {available_models}\")\n self.log(f\"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n # Track matching status for debugging\n matched_models = []\n unmatched_models = []\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n matched_models.append(model_name)\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n self.log(f\"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding\")\n else:\n # No matching embedding found for this model\n unmatched_models.append(model_name)\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}\")\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n self.log(f\"[ERROR] Embedding generation failed for '{model_name}': {e}\")\n\n # Log summary of model matching\n logger.info(f\"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n self.log(f\"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n if unmatched_models:\n self.log(f\"[WARN] Unmatched models in index: {unmatched_models}\")\n\n if not query_embeddings:\n msg = (\n f\"Failed to generate embeddings for any model. \"\n f\"Index has models: {available_models}, but no matching embedding objects found. \"\n f\"Available embedding identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[FAIL] Search failed: {msg}\")\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n self.log(f\"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'\")\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n self.log(f\"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping\")\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n self.log(\n f\"[WARN] No valid KNN queries could be built. \"\n f\"Query embeddings generated: {list(query_embeddings.keys())}, \"\n f\"but no matching knn_vector fields found in index.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: \"\n f\"{list(query_embeddings.keys())}\"\n )\n self.log(f\"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}\")\n self.log(f\"[EXEC] Embedding models used: {list(query_embeddings.keys())}\")\n self.log(f\"[EXEC] KNN fields being queried: {embedding_fields}\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n self.log(f\"[RESULT] Search complete: {len(hits)} results found\")\n\n if len(hits) == 0:\n self.log(\n f\"[EMPTY] Debug info: \"\n f\"models_in_index={available_models}, \"\n f\"matched_models={matched_models}, \"\n f\"knn_fields={embedding_fields}, \"\n f\"filters={len(filter_clauses)} clauses\"\n )\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n logger.info(f\"Search query: {self.search_query}\")\n if self._cached_vector_store is None:\n self.build_vector_store()\n\n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n\n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config" + "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n\n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"Starting ingestion mode...\")\n\n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n logger.debug(\n f\"[INGESTION] ingest_data type: \"\n f\"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\"\n )\n logger.debug(\n f\"[INGESTION] ingest_data content: \"\n f\"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\"\n )\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n # Restrict concurrency for IBM/Watsonx models to avoid rate limits\n is_ibm = (embedding_model and \"ibm\" in str(embedding_model).lower()) or (\n selected_embedding and \"watsonx\" in type(selected_embedding).__name__.lower()\n )\n logger.debug(f\"Is IBM: {is_ibm}\")\n max_workers = 1 if is_ibm else min(max(len(texts), 1), 8)\n\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(\n f\"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\"\n )\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n logger.debug(f\"Model detection query: {agg_query}\")\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n # Log detailed bucket info for debugging\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n if not models:\n total_hits = result.get(\"hits\", {}).get(\"total\", {})\n total_count = total_hits.get(\"value\", 0) if isinstance(total_hits, dict) else total_hits\n logger.warning(\n f\"No embedding_model values found in index '{self.index_name}'. \"\n f\"Total docs in index: {total_count}. \"\n f\"This may indicate documents were indexed without the embedding_model field.\"\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n fallback_model = self._get_embedding_model_name()\n logger.info(f\"Using fallback model: {fallback_model}\")\n return [fallback_model]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n if not embeddings_list:\n logger.error(\n \"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\"\n )\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models_attr}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models_attr and isinstance(available_models_attr, dict):\n logger.info(\n f\"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict\"\n )\n for model_name_key, dedicated_embedding in available_models_attr.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n self.log(f\"[SEARCH] Models detected in index: {available_models}\")\n self.log(f\"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n # Track matching status for debugging\n matched_models = []\n unmatched_models = []\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n matched_models.append(model_name)\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n self.log(f\"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding\")\n else:\n # No matching embedding found for this model\n unmatched_models.append(model_name)\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}\")\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n self.log(f\"[ERROR] Embedding generation failed for '{model_name}': {e}\")\n\n # Log summary of model matching\n logger.info(f\"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n self.log(f\"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n if unmatched_models:\n self.log(f\"[WARN] Unmatched models in index: {unmatched_models}\")\n\n if not query_embeddings:\n msg = (\n f\"Failed to generate embeddings for any model. \"\n f\"Index has models: {available_models}, but no matching embedding objects found. \"\n f\"Available embedding identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[FAIL] Search failed: {msg}\")\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n self.log(f\"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'\")\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n self.log(f\"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping\")\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n self.log(\n f\"[WARN] No valid KNN queries could be built. \"\n f\"Query embeddings generated: {list(query_embeddings.keys())}, \"\n f\"but no matching knn_vector fields found in index.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: \"\n f\"{list(query_embeddings.keys())}\"\n )\n self.log(f\"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}\")\n self.log(f\"[EXEC] Embedding models used: {list(query_embeddings.keys())}\")\n self.log(f\"[EXEC] KNN fields being queried: {embedding_fields}\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n self.log(f\"[RESULT] Search complete: {len(hits)} results found\")\n\n if len(hits) == 0:\n self.log(\n f\"[EMPTY] Debug info: \"\n f\"models_in_index={available_models}, \"\n f\"matched_models={matched_models}, \"\n f\"knn_fields={embedding_fields}, \"\n f\"filters={len(filter_clauses)} clauses\"\n )\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n logger.info(f\"Search query: {self.search_query}\")\n if self._cached_vector_store is None:\n self.build_vector_store()\n\n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n\n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" }, "docs_metadata": { "_input_type": "TableInput", @@ -4792,4 +4792,4 @@ "assistants", "agents" ] -} \ No newline at end of file +} diff --git a/flows/openrag_nudges.json b/flows/openrag_nudges.json index 8f19e199..f7f38d2a 100644 --- a/flows/openrag_nudges.json +++ b/flows/openrag_nudges.json @@ -2519,7 +2519,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n\n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"Starting ingestion mode...\")\n\n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n logger.debug(\n f\"[INGESTION] ingest_data type: \"\n f\"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\"\n )\n logger.debug(\n f\"[INGESTION] ingest_data content: \"\n f\"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\"\n )\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n max_workers = min(max(len(texts), 1), 8)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(\n f\"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\"\n )\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n logger.debug(f\"Model detection query: {agg_query}\")\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n # Log detailed bucket info for debugging\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n if not models:\n total_hits = result.get(\"hits\", {}).get(\"total\", {})\n total_count = total_hits.get(\"value\", 0) if isinstance(total_hits, dict) else total_hits\n logger.warning(\n f\"No embedding_model values found in index '{self.index_name}'. \"\n f\"Total docs in index: {total_count}. \"\n f\"This may indicate documents were indexed without the embedding_model field.\"\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n fallback_model = self._get_embedding_model_name()\n logger.info(f\"Using fallback model: {fallback_model}\")\n return [fallback_model]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n if not embeddings_list:\n logger.error(\n \"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\"\n )\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models_attr}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models_attr and isinstance(available_models_attr, dict):\n logger.info(\n f\"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict\"\n )\n for model_name_key, dedicated_embedding in available_models_attr.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n self.log(f\"[SEARCH] Models detected in index: {available_models}\")\n self.log(f\"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n # Track matching status for debugging\n matched_models = []\n unmatched_models = []\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n matched_models.append(model_name)\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n self.log(f\"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding\")\n else:\n # No matching embedding found for this model\n unmatched_models.append(model_name)\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}\")\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n self.log(f\"[ERROR] Embedding generation failed for '{model_name}': {e}\")\n\n # Log summary of model matching\n logger.info(f\"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n self.log(f\"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n if unmatched_models:\n self.log(f\"[WARN] Unmatched models in index: {unmatched_models}\")\n\n if not query_embeddings:\n msg = (\n f\"Failed to generate embeddings for any model. \"\n f\"Index has models: {available_models}, but no matching embedding objects found. \"\n f\"Available embedding identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[FAIL] Search failed: {msg}\")\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n self.log(f\"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'\")\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n self.log(f\"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping\")\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n self.log(\n f\"[WARN] No valid KNN queries could be built. \"\n f\"Query embeddings generated: {list(query_embeddings.keys())}, \"\n f\"but no matching knn_vector fields found in index.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: \"\n f\"{list(query_embeddings.keys())}\"\n )\n self.log(f\"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}\")\n self.log(f\"[EXEC] Embedding models used: {list(query_embeddings.keys())}\")\n self.log(f\"[EXEC] KNN fields being queried: {embedding_fields}\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n self.log(f\"[RESULT] Search complete: {len(hits)} results found\")\n\n if len(hits) == 0:\n self.log(\n f\"[EMPTY] Debug info: \"\n f\"models_in_index={available_models}, \"\n f\"matched_models={matched_models}, \"\n f\"knn_fields={embedding_fields}, \"\n f\"filters={len(filter_clauses)} clauses\"\n )\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n logger.info(f\"Search query: {self.search_query}\")\n if self._cached_vector_store is None:\n self.build_vector_store()\n\n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n\n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config" + "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n\n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"Starting ingestion mode...\")\n\n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n logger.debug(\n f\"[INGESTION] ingest_data type: \"\n f\"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\"\n )\n logger.debug(\n f\"[INGESTION] ingest_data content: \"\n f\"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\"\n )\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n # Restrict concurrency for IBM/Watsonx models to avoid rate limits\n is_ibm = (embedding_model and \"ibm\" in str(embedding_model).lower()) or (\n selected_embedding and \"watsonx\" in type(selected_embedding).__name__.lower()\n )\n logger.debug(f\"Is IBM: {is_ibm}\")\n max_workers = 1 if is_ibm else min(max(len(texts), 1), 8)\n\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(\n f\"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\"\n )\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n logger.debug(f\"Model detection query: {agg_query}\")\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n # Log detailed bucket info for debugging\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n if not models:\n total_hits = result.get(\"hits\", {}).get(\"total\", {})\n total_count = total_hits.get(\"value\", 0) if isinstance(total_hits, dict) else total_hits\n logger.warning(\n f\"No embedding_model values found in index '{self.index_name}'. \"\n f\"Total docs in index: {total_count}. \"\n f\"This may indicate documents were indexed without the embedding_model field.\"\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n fallback_model = self._get_embedding_model_name()\n logger.info(f\"Using fallback model: {fallback_model}\")\n return [fallback_model]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n if not embeddings_list:\n logger.error(\n \"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\"\n )\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models_attr}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models_attr and isinstance(available_models_attr, dict):\n logger.info(\n f\"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict\"\n )\n for model_name_key, dedicated_embedding in available_models_attr.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n self.log(f\"[SEARCH] Models detected in index: {available_models}\")\n self.log(f\"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n # Track matching status for debugging\n matched_models = []\n unmatched_models = []\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n matched_models.append(model_name)\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n self.log(f\"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding\")\n else:\n # No matching embedding found for this model\n unmatched_models.append(model_name)\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}\")\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n self.log(f\"[ERROR] Embedding generation failed for '{model_name}': {e}\")\n\n # Log summary of model matching\n logger.info(f\"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n self.log(f\"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n if unmatched_models:\n self.log(f\"[WARN] Unmatched models in index: {unmatched_models}\")\n\n if not query_embeddings:\n msg = (\n f\"Failed to generate embeddings for any model. \"\n f\"Index has models: {available_models}, but no matching embedding objects found. \"\n f\"Available embedding identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[FAIL] Search failed: {msg}\")\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n self.log(f\"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'\")\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n self.log(f\"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping\")\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n self.log(\n f\"[WARN] No valid KNN queries could be built. \"\n f\"Query embeddings generated: {list(query_embeddings.keys())}, \"\n f\"but no matching knn_vector fields found in index.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: \"\n f\"{list(query_embeddings.keys())}\"\n )\n self.log(f\"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}\")\n self.log(f\"[EXEC] Embedding models used: {list(query_embeddings.keys())}\")\n self.log(f\"[EXEC] KNN fields being queried: {embedding_fields}\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n self.log(f\"[RESULT] Search complete: {len(hits)} results found\")\n\n if len(hits) == 0:\n self.log(\n f\"[EMPTY] Debug info: \"\n f\"models_in_index={available_models}, \"\n f\"matched_models={matched_models}, \"\n f\"knn_fields={embedding_fields}, \"\n f\"filters={len(filter_clauses)} clauses\"\n )\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n logger.info(f\"Search query: {self.search_query}\")\n if self._cached_vector_store is None:\n self.build_vector_store()\n\n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n\n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" }, "docs_metadata": { "_input_type": "TableInput", @@ -4119,4 +4119,4 @@ "assistants", "agents" ] -} \ No newline at end of file +} diff --git a/flows/openrag_url_mcp.json b/flows/openrag_url_mcp.json index 8bb2e2bb..008e6abd 100644 --- a/flows/openrag_url_mcp.json +++ b/flows/openrag_url_mcp.json @@ -3723,7 +3723,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n \n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"🔄 Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"Starting ingestion mode...\")\n \n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n \n logger.debug(f\"[INGESTION] ingest_data type: {type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\")\n logger.debug(f\"[INGESTION] ingest_data content: {self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\")\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"✓ Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n \n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n \n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n \n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"✓ Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n max_workers = min(max(len(texts), 1), 8)\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(f\"✓ Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\")\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n return [self._get_embedding_model_name()]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n \n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n \n if not embeddings_list:\n logger.error(\"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\")\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models and isinstance(available_models, dict):\n logger.info(f\"Embedding object {idx} provides {len(available_models)} models via available_models dict\")\n for model_name_key, dedicated_embedding in available_models.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n else:\n # No matching embedding found for this model\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available models: {list(embedding_by_model.keys())}\"\n )\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n\n if not query_embeddings:\n msg = \"Failed to generate embeddings for any model\"\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n if self._cached_vector_store is None:\n self.build_vector_store()\n \n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n \n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" + "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n\n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"Starting ingestion mode...\")\n\n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n logger.debug(\n f\"[INGESTION] ingest_data type: \"\n f\"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\"\n )\n logger.debug(\n f\"[INGESTION] ingest_data content: \"\n f\"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\"\n )\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n # Restrict concurrency for IBM/Watsonx models to avoid rate limits\n is_ibm = (embedding_model and \"ibm\" in str(embedding_model).lower()) or (\n selected_embedding and \"watsonx\" in type(selected_embedding).__name__.lower()\n )\n logger.debug(f\"Is IBM: {is_ibm}\")\n max_workers = 1 if is_ibm else min(max(len(texts), 1), 8)\n\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(\n f\"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\"\n )\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n logger.debug(f\"Model detection query: {agg_query}\")\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n # Log detailed bucket info for debugging\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n if not models:\n total_hits = result.get(\"hits\", {}).get(\"total\", {})\n total_count = total_hits.get(\"value\", 0) if isinstance(total_hits, dict) else total_hits\n logger.warning(\n f\"No embedding_model values found in index '{self.index_name}'. \"\n f\"Total docs in index: {total_count}. \"\n f\"This may indicate documents were indexed without the embedding_model field.\"\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n fallback_model = self._get_embedding_model_name()\n logger.info(f\"Using fallback model: {fallback_model}\")\n return [fallback_model]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n if not embeddings_list:\n logger.error(\n \"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\"\n )\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models_attr}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models_attr and isinstance(available_models_attr, dict):\n logger.info(\n f\"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict\"\n )\n for model_name_key, dedicated_embedding in available_models_attr.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n self.log(f\"[SEARCH] Models detected in index: {available_models}\")\n self.log(f\"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n # Track matching status for debugging\n matched_models = []\n unmatched_models = []\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n matched_models.append(model_name)\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n self.log(f\"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding\")\n else:\n # No matching embedding found for this model\n unmatched_models.append(model_name)\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}\")\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n self.log(f\"[ERROR] Embedding generation failed for '{model_name}': {e}\")\n\n # Log summary of model matching\n logger.info(f\"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n self.log(f\"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n if unmatched_models:\n self.log(f\"[WARN] Unmatched models in index: {unmatched_models}\")\n\n if not query_embeddings:\n msg = (\n f\"Failed to generate embeddings for any model. \"\n f\"Index has models: {available_models}, but no matching embedding objects found. \"\n f\"Available embedding identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[FAIL] Search failed: {msg}\")\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n self.log(f\"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'\")\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n self.log(f\"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping\")\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n self.log(\n f\"[WARN] No valid KNN queries could be built. \"\n f\"Query embeddings generated: {list(query_embeddings.keys())}, \"\n f\"but no matching knn_vector fields found in index.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: \"\n f\"{list(query_embeddings.keys())}\"\n )\n self.log(f\"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}\")\n self.log(f\"[EXEC] Embedding models used: {list(query_embeddings.keys())}\")\n self.log(f\"[EXEC] KNN fields being queried: {embedding_fields}\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n self.log(f\"[RESULT] Search complete: {len(hits)} results found\")\n\n if len(hits) == 0:\n self.log(\n f\"[EMPTY] Debug info: \"\n f\"models_in_index={available_models}, \"\n f\"matched_models={matched_models}, \"\n f\"knn_fields={embedding_fields}, \"\n f\"filters={len(filter_clauses)} clauses\"\n )\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n logger.info(f\"Search query: {self.search_query}\")\n if self._cached_vector_store is None:\n self.build_vector_store()\n\n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n\n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" }, "docs_metadata": { "_input_type": "TableInput", @@ -6062,4 +6062,4 @@ "rag", "q-a" ] -} \ No newline at end of file +} diff --git a/frontend/app/api/queries/useProviderHealthQuery.ts b/frontend/app/api/queries/useProviderHealthQuery.ts index d0f73335..6586e6dd 100644 --- a/frontend/app/api/queries/useProviderHealthQuery.ts +++ b/frontend/app/api/queries/useProviderHealthQuery.ts @@ -3,6 +3,7 @@ import { useQuery, useQueryClient, } from "@tanstack/react-query"; +import { useChat } from "@/contexts/chat-context"; import { useGetSettingsQuery } from "./useGetSettingsQuery"; export interface ProviderHealthDetails { @@ -24,6 +25,7 @@ export interface ProviderHealthResponse { export interface ProviderHealthParams { provider?: "openai" | "ollama" | "watsonx"; + test_completion?: boolean; } // Track consecutive failures for exponential backoff @@ -38,6 +40,9 @@ export const useProviderHealthQuery = ( ) => { const queryClient = useQueryClient(); + // Get chat error state from context (ChatProvider wraps the entire app in layout.tsx) + const { hasChatError, setChatError } = useChat(); + const { data: settings = {} } = useGetSettingsQuery(); async function checkProviderHealth(): Promise { @@ -49,6 +54,12 @@ export const useProviderHealthQuery = ( url.searchParams.set("provider", params.provider); } + // Add test_completion query param if specified or if chat error exists + const testCompletion = params?.test_completion ?? hasChatError; + if (testCompletion) { + url.searchParams.set("test_completion", "true"); + } + const response = await fetch(url.toString()); if (response.ok) { @@ -90,7 +101,7 @@ export const useProviderHealthQuery = ( } } - const queryKey = ["provider", "health"]; + const queryKey = ["provider", "health", params?.test_completion]; const failureCountKey = queryKey.join("-"); const queryResult = useQuery( @@ -101,26 +112,32 @@ export const useProviderHealthQuery = ( refetchInterval: (query) => { const data = query.state.data; const status = data?.status; - + // If healthy, reset failure count and check every 30 seconds + // Also reset chat error flag if we're using test_completion=true and it succeeded if (status === "healthy") { failureCountMap.set(failureCountKey, 0); + // If we were checking with test_completion=true due to chat errors, reset the flag + if (hasChatError && setChatError) { + setChatError(false); + } return 30000; } - + // If backend unavailable, use moderate polling if (status === "backend-unavailable") { return 15000; } - + // For unhealthy/error status, use exponential backoff const currentFailures = failureCountMap.get(failureCountKey) || 0; failureCountMap.set(failureCountKey, currentFailures + 1); - + // Exponential backoff: 5s, 10s, 20s, then 30s const backoffDelays = [5000, 10000, 20000, 30000]; - const delay = backoffDelays[Math.min(currentFailures, backoffDelays.length - 1)]; - + const delay = + backoffDelays[Math.min(currentFailures, backoffDelays.length - 1)]; + return delay; }, refetchOnWindowFocus: false, // Disabled to reduce unnecessary calls on tab switches diff --git a/frontend/app/chat/page.tsx b/frontend/app/chat/page.tsx index 358424f3..87ae6b60 100644 --- a/frontend/app/chat/page.tsx +++ b/frontend/app/chat/page.tsx @@ -51,6 +51,7 @@ function ChatPage() { ]); const [input, setInput] = useState(""); const { loading, setLoading } = useLoadingStore(); + const { setChatError } = useChat(); const [asyncMode, setAsyncMode] = useState(true); const [expandedFunctionCalls, setExpandedFunctionCalls] = useState< Set @@ -123,6 +124,8 @@ function ChatPage() { console.error("Streaming error:", error); setLoading(false); setWaitingTooLong(false); + // Set chat error flag to trigger test_completion=true on health checks + setChatError(true); const errorMessage: Message = { role: "assistant", content: @@ -197,6 +200,11 @@ function ChatPage() { const result = await response.json(); console.log("Upload result:", result); + if (!response.ok) { + // Set chat error flag if upload fails + setChatError(true); + } + if (response.status === 201) { // New flow: Got task ID, start tracking with centralized system const taskId = result.task_id || result.id; @@ -255,6 +263,8 @@ function ChatPage() { } } catch (error) { console.error("Upload failed:", error); + // Set chat error flag to trigger test_completion=true on health checks + setChatError(true); const errorMessage: Message = { role: "assistant", content: `❌ Failed to process document. Please try again.`, @@ -858,6 +868,8 @@ function ChatPage() { } } else { console.error("Chat failed:", result.error); + // Set chat error flag to trigger test_completion=true on health checks + setChatError(true); const errorMessage: Message = { role: "assistant", content: "Sorry, I encountered an error. Please try again.", @@ -867,6 +879,8 @@ function ChatPage() { } } catch (error) { console.error("Chat error:", error); + // Set chat error flag to trigger test_completion=true on health checks + setChatError(true); const errorMessage: Message = { role: "assistant", content: diff --git a/frontend/app/onboarding/_components/onboarding-content.tsx b/frontend/app/onboarding/_components/onboarding-content.tsx index 7473a916..1127e725 100644 --- a/frontend/app/onboarding/_components/onboarding-content.tsx +++ b/frontend/app/onboarding/_components/onboarding-content.tsx @@ -191,7 +191,7 @@ export function OnboardingContent({ return ( { const errorMessage = error instanceof Error ? error.message : "Upload failed"; console.error("Upload failed", errorMessage); + // Dispatch event that chat context can listen to + // This avoids circular dependency issues + if (typeof window !== "undefined") { + window.dispatchEvent( + new CustomEvent("ingestionFailed", { + detail: { source: "onboarding" }, + }), + ); + } + // Show error toast notification toast.error("Document upload failed", { description: errorMessage, diff --git a/frontend/app/onboarding/page.tsx b/frontend/app/onboarding/page.tsx deleted file mode 100644 index 5cbaa250..00000000 --- a/frontend/app/onboarding/page.tsx +++ /dev/null @@ -1,81 +0,0 @@ -"use client"; - -import { Suspense, useEffect } from "react"; -import { useRouter } from "next/navigation"; -import { DoclingHealthBanner } from "@/components/docling-health-banner"; -import { ProtectedRoute } from "@/components/protected-route"; -import { DotPattern } from "@/components/ui/dot-pattern"; -import { cn } from "@/lib/utils"; -import { useGetSettingsQuery } from "@/app/api/queries/useGetSettingsQuery"; -import OnboardingCard from "./_components/onboarding-card"; - -function LegacyOnboardingPage() { - const router = useRouter(); - const { data: settingsDb, isLoading: isSettingsLoading } = - useGetSettingsQuery(); - - // Redirect if already completed onboarding - useEffect(() => { - if (!isSettingsLoading && settingsDb && settingsDb.edited) { - router.push("/"); - } - }, [isSettingsLoading, settingsDb, router]); - - const handleComplete = () => { - router.push("/"); - }; - - return ( -
- - - - -
-
-

- Connect a model provider -

-
- -
-
- ); -} - -function OnboardingRouter() { - const updatedOnboarding = process.env.UPDATED_ONBOARDING === "true"; - const router = useRouter(); - - useEffect(() => { - if (updatedOnboarding) { - router.push("/new-onboarding"); - } - }, [updatedOnboarding, router]); - - if (updatedOnboarding) { - return null; - } - - return ; -} - -export default function ProtectedOnboardingPage() { - return ( - - Loading onboarding...}> - - - - ); -} diff --git a/frontend/components/knowledge-dropdown.tsx b/frontend/components/knowledge-dropdown.tsx index d0100790..932eb974 100644 --- a/frontend/components/knowledge-dropdown.tsx +++ b/frontend/components/knowledge-dropdown.tsx @@ -238,6 +238,15 @@ export function KnowledgeDropdown() { await uploadFileUtil(file, replace); refetchTasks(); } catch (error) { + // Dispatch event that chat context can listen to + // This avoids circular dependency issues + if (typeof window !== "undefined") { + window.dispatchEvent( + new CustomEvent("ingestionFailed", { + detail: { source: "knowledge-dropdown" }, + }), + ); + } toast.error("Upload failed", { description: error instanceof Error ? error.message : "Unknown error", }); diff --git a/frontend/components/provider-health-banner.tsx b/frontend/components/provider-health-banner.tsx index f83713eb..1a91a601 100644 --- a/frontend/components/provider-health-banner.tsx +++ b/frontend/components/provider-health-banner.tsx @@ -6,6 +6,7 @@ import { useProviderHealthQuery } from "@/app/api/queries/useProviderHealthQuery import type { ModelProvider } from "@/app/settings/_helpers/model-helpers"; import { Banner, BannerIcon, BannerTitle } from "@/components/ui/banner"; import { cn } from "@/lib/utils"; +import { useChat } from "@/contexts/chat-context"; import { Button } from "./ui/button"; interface ProviderHealthBannerProps { @@ -14,13 +15,16 @@ interface ProviderHealthBannerProps { // Custom hook to check provider health status export function useProviderHealth() { + const { hasChatError } = useChat(); const { data: health, isLoading, isFetching, error, isError, - } = useProviderHealthQuery(); + } = useProviderHealthQuery({ + test_completion: hasChatError, // Use test_completion=true when chat errors occur + }); const isHealthy = health?.status === "healthy" && !isError; // Only consider unhealthy if backend is up but provider validation failed diff --git a/frontend/contexts/chat-context.tsx b/frontend/contexts/chat-context.tsx index bee05b98..59b5edeb 100644 --- a/frontend/contexts/chat-context.tsx +++ b/frontend/contexts/chat-context.tsx @@ -79,6 +79,8 @@ interface ChatContextType { conversationFilter: KnowledgeFilter | null; // responseId: undefined = use currentConversationId, null = don't save to localStorage setConversationFilter: (filter: KnowledgeFilter | null, responseId?: string | null) => void; + hasChatError: boolean; + setChatError: (hasError: boolean) => void; } const ChatContext = createContext(undefined); @@ -108,6 +110,19 @@ export function ChatProvider({ children }: ChatProviderProps) { const [conversationLoaded, setConversationLoaded] = useState(false); const [conversationFilter, setConversationFilterState] = useState(null); + const [hasChatError, setChatError] = useState(false); + + // Listen for ingestion failures and set chat error flag + useEffect(() => { + const handleIngestionFailed = () => { + setChatError(true); + }; + + window.addEventListener("ingestionFailed", handleIngestionFailed); + return () => { + window.removeEventListener("ingestionFailed", handleIngestionFailed); + }; + }, []); // Debounce refresh requests to prevent excessive reloads const refreshTimeoutRef = useRef(null); @@ -358,6 +373,8 @@ export function ChatProvider({ children }: ChatProviderProps) { setConversationLoaded, conversationFilter, setConversationFilter, + hasChatError, + setChatError, }), [ endpoint, @@ -378,6 +395,7 @@ export function ChatProvider({ children }: ChatProviderProps) { conversationLoaded, conversationFilter, setConversationFilter, + hasChatError, ], ); diff --git a/frontend/contexts/task-context.tsx b/frontend/contexts/task-context.tsx index 97cdddf1..780bbc7e 100644 --- a/frontend/contexts/task-context.tsx +++ b/frontend/contexts/task-context.tsx @@ -323,6 +323,20 @@ export function TaskProvider({ children }: { children: React.ReactNode }) { currentTask.error || "Unknown error" }`, }); + + // Set chat error flag to trigger test_completion=true on health checks + // Only for ingestion-related tasks (tasks with files are ingestion tasks) + if (currentTask.files && Object.keys(currentTask.files).length > 0) { + // Dispatch event that chat context can listen to + // This avoids circular dependency issues + if (typeof window !== "undefined") { + window.dispatchEvent( + new CustomEvent("ingestionFailed", { + detail: { taskId: currentTask.task_id }, + }), + ); + } + } } } }); diff --git a/frontend/next.config.ts b/frontend/next.config.ts index 6b6b61a4..5f929df5 100644 --- a/frontend/next.config.ts +++ b/frontend/next.config.ts @@ -14,9 +14,6 @@ const nextConfig: NextConfig = { eslint: { ignoreDuringBuilds: true, }, - env: { - UPDATED_ONBOARDING: process.env.UPDATED_ONBOARDING, - }, }; export default nextConfig; diff --git a/src/api/provider_health.py b/src/api/provider_health.py index 67b6e765..3802d8dc 100644 --- a/src/api/provider_health.py +++ b/src/api/provider_health.py @@ -1,10 +1,11 @@ """Provider health check endpoint.""" +import asyncio import httpx from starlette.responses import JSONResponse from utils.logging_config import get_logger from config.settings import get_openrag_config -from api.provider_validation import validate_provider_setup, _test_ollama_lightweight_health +from api.provider_validation import validate_provider_setup logger = get_logger(__name__) @@ -16,6 +17,8 @@ async def check_provider_health(request): Query parameters: provider (optional): Provider to check ('openai', 'ollama', 'watsonx', 'anthropic'). If not provided, checks the currently configured provider. + test_completion (optional): If 'true', performs full validation with completion/embedding tests (consumes credits). + If 'false' or not provided, performs lightweight validation (no/minimal credits consumed). Returns: 200: Provider is healthy and validated @@ -26,6 +29,7 @@ async def check_provider_health(request): # Get optional provider from query params query_params = dict(request.query_params) check_provider = query_params.get("provider") + test_completion = query_params.get("test_completion", "false").lower() == "true" # Get current config current_config = get_openrag_config() @@ -100,6 +104,7 @@ async def check_provider_health(request): llm_model=llm_model, endpoint=endpoint, project_id=project_id, + test_completion=test_completion, ) return JSONResponse( @@ -124,23 +129,14 @@ async def check_provider_health(request): # Validate LLM provider try: - # For Ollama, use lightweight health check that doesn't block on active requests - if provider == "ollama": - try: - await _test_ollama_lightweight_health(endpoint) - except Exception as lightweight_error: - # If lightweight check fails, Ollama is down or misconfigured - llm_error = str(lightweight_error) - logger.error(f"LLM provider ({provider}) lightweight check failed: {llm_error}") - raise - else: - await validate_provider_setup( - provider=provider, - api_key=api_key, - llm_model=llm_model, - endpoint=endpoint, - project_id=project_id, - ) + await validate_provider_setup( + provider=provider, + api_key=api_key, + llm_model=llm_model, + endpoint=endpoint, + project_id=project_id, + test_completion=test_completion, + ) except httpx.TimeoutException as e: # Timeout means provider is busy, not misconfigured if provider == "ollama": @@ -154,24 +150,25 @@ async def check_provider_health(request): logger.error(f"LLM provider ({provider}) validation failed: {llm_error}") # Validate embedding provider + # For WatsonX with test_completion=True, wait 2 seconds between completion and embedding tests + if ( + test_completion + and provider == "watsonx" + and embedding_provider == "watsonx" + and llm_error is None + ): + logger.info("Waiting 2 seconds before WatsonX embedding test (after completion test)") + await asyncio.sleep(2) + try: - # For Ollama, use lightweight health check first - if embedding_provider == "ollama": - try: - await _test_ollama_lightweight_health(embedding_endpoint) - except Exception as lightweight_error: - # If lightweight check fails, Ollama is down or misconfigured - embedding_error = str(lightweight_error) - logger.error(f"Embedding provider ({embedding_provider}) lightweight check failed: {embedding_error}") - raise - else: - await validate_provider_setup( - provider=embedding_provider, - api_key=embedding_api_key, - embedding_model=embedding_model, - endpoint=embedding_endpoint, - project_id=embedding_project_id, - ) + await validate_provider_setup( + provider=embedding_provider, + api_key=embedding_api_key, + embedding_model=embedding_model, + endpoint=embedding_endpoint, + project_id=embedding_project_id, + test_completion=test_completion, + ) except httpx.TimeoutException as e: # Timeout means provider is busy, not misconfigured if embedding_provider == "ollama": diff --git a/src/api/provider_validation.py b/src/api/provider_validation.py index eb363930..c4307003 100644 --- a/src/api/provider_validation.py +++ b/src/api/provider_validation.py @@ -14,17 +14,20 @@ async def validate_provider_setup( llm_model: str = None, endpoint: str = None, project_id: str = None, + test_completion: bool = False, ) -> None: """ Validate provider setup by testing completion with tool calling and embedding. Args: - provider: Provider name ('openai', 'watsonx', 'ollama') + provider: Provider name ('openai', 'watsonx', 'ollama', 'anthropic') api_key: API key for the provider (optional for ollama) embedding_model: Embedding model to test llm_model: LLM model to test endpoint: Provider endpoint (required for ollama and watsonx) project_id: Project ID (required for watsonx) + test_completion: If True, performs full validation with completion/embedding tests (consumes credits). + If False, performs lightweight validation (no credits consumed). Default: False. Raises: Exception: If validation fails with message "Setup failed, please try again or select a different provider." @@ -32,29 +35,37 @@ async def validate_provider_setup( provider_lower = provider.lower() try: - logger.info(f"Starting validation for provider: {provider_lower}") + logger.info(f"Starting validation for provider: {provider_lower} (test_completion={test_completion})") - if embedding_model: - # Test embedding - await test_embedding( + if test_completion: + # Full validation with completion/embedding tests (consumes credits) + if embedding_model: + # Test embedding + await test_embedding( + provider=provider_lower, + api_key=api_key, + embedding_model=embedding_model, + endpoint=endpoint, + project_id=project_id, + ) + elif llm_model: + # Test completion with tool calling + await test_completion_with_tools( + provider=provider_lower, + api_key=api_key, + llm_model=llm_model, + endpoint=endpoint, + project_id=project_id, + ) + else: + # Lightweight validation (no credits consumed) + await test_lightweight_health( provider=provider_lower, api_key=api_key, - embedding_model=embedding_model, endpoint=endpoint, project_id=project_id, ) - elif llm_model: - # Test completion with tool calling - await test_completion_with_tools( - provider=provider_lower, - api_key=api_key, - llm_model=llm_model, - endpoint=endpoint, - project_id=project_id, - ) - - logger.info(f"Validation successful for provider: {provider_lower}") except Exception as e: @@ -62,6 +73,26 @@ async def validate_provider_setup( raise Exception("Setup failed, please try again or select a different provider.") +async def test_lightweight_health( + provider: str, + api_key: str = None, + endpoint: str = None, + project_id: str = None, +) -> None: + """Test provider health with lightweight check (no credits consumed).""" + + if provider == "openai": + await _test_openai_lightweight_health(api_key) + elif provider == "watsonx": + await _test_watsonx_lightweight_health(api_key, endpoint, project_id) + elif provider == "ollama": + await _test_ollama_lightweight_health(endpoint) + elif provider == "anthropic": + await _test_anthropic_lightweight_health(api_key) + else: + raise ValueError(f"Unknown provider: {provider}") + + async def test_completion_with_tools( provider: str, api_key: str = None, @@ -103,6 +134,40 @@ async def test_embedding( # OpenAI validation functions +async def _test_openai_lightweight_health(api_key: str) -> None: + """Test OpenAI API key validity with lightweight check. + + Only checks if the API key is valid without consuming credits. + Uses the /v1/models endpoint which doesn't consume credits. + """ + try: + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + } + + async with httpx.AsyncClient() as client: + # Use /v1/models endpoint which validates the key without consuming credits + response = await client.get( + "https://api.openai.com/v1/models", + headers=headers, + timeout=10.0, # Short timeout for lightweight check + ) + + if response.status_code != 200: + logger.error(f"OpenAI lightweight health check failed: {response.status_code}") + raise Exception(f"OpenAI API key validation failed: {response.status_code}") + + logger.info("OpenAI lightweight health check passed") + + except httpx.TimeoutException: + logger.error("OpenAI lightweight health check timed out") + raise Exception("OpenAI API request timed out") + except Exception as e: + logger.error(f"OpenAI lightweight health check failed: {str(e)}") + raise + + async def _test_openai_completion_with_tools(api_key: str, llm_model: str) -> None: """Test OpenAI completion with tool calling.""" try: @@ -213,6 +278,45 @@ async def _test_openai_embedding(api_key: str, embedding_model: str) -> None: # IBM Watson validation functions +async def _test_watsonx_lightweight_health( + api_key: str, endpoint: str, project_id: str +) -> None: + """Test WatsonX API key validity with lightweight check. + + Only checks if the API key is valid by getting a bearer token. + Does not consume credits by avoiding model inference requests. + """ + try: + # Get bearer token from IBM IAM - this validates the API key without consuming credits + async with httpx.AsyncClient() as client: + token_response = await client.post( + "https://iam.cloud.ibm.com/identity/token", + headers={"Content-Type": "application/x-www-form-urlencoded"}, + data={ + "grant_type": "urn:ibm:params:oauth:grant-type:apikey", + "apikey": api_key, + }, + timeout=10.0, # Short timeout for lightweight check + ) + + if token_response.status_code != 200: + logger.error(f"IBM IAM token request failed: {token_response.status_code}") + raise Exception("Failed to authenticate with IBM Watson - invalid API key") + + bearer_token = token_response.json().get("access_token") + if not bearer_token: + raise Exception("No access token received from IBM") + + logger.info("WatsonX lightweight health check passed - API key is valid") + + except httpx.TimeoutException: + logger.error("WatsonX lightweight health check timed out") + raise Exception("WatsonX API request timed out") + except Exception as e: + logger.error(f"WatsonX lightweight health check failed: {str(e)}") + raise + + async def _test_watsonx_completion_with_tools( api_key: str, llm_model: str, endpoint: str, project_id: str ) -> None: @@ -483,6 +587,48 @@ async def _test_ollama_embedding(embedding_model: str, endpoint: str) -> None: # Anthropic validation functions +async def _test_anthropic_lightweight_health(api_key: str) -> None: + """Test Anthropic API key validity with lightweight check. + + Only checks if the API key is valid without consuming credits. + Uses a minimal messages request with max_tokens=1 to validate the key. + """ + try: + headers = { + "x-api-key": api_key, + "anthropic-version": "2023-06-01", + "Content-Type": "application/json", + } + + # Minimal validation request - uses cheapest model with minimal tokens + payload = { + "model": "claude-3-5-haiku-latest", # Cheapest model + "max_tokens": 1, # Minimum tokens to validate key + "messages": [{"role": "user", "content": "test"}], + } + + async with httpx.AsyncClient() as client: + response = await client.post( + "https://api.anthropic.com/v1/messages", + headers=headers, + json=payload, + timeout=10.0, # Short timeout for lightweight check + ) + + if response.status_code != 200: + logger.error(f"Anthropic lightweight health check failed: {response.status_code}") + raise Exception(f"Anthropic API key validation failed: {response.status_code}") + + logger.info("Anthropic lightweight health check passed") + + except httpx.TimeoutException: + logger.error("Anthropic lightweight health check timed out") + raise Exception("Anthropic API request timed out") + except Exception as e: + logger.error(f"Anthropic lightweight health check failed: {str(e)}") + raise + + async def _test_anthropic_completion_with_tools(api_key: str, llm_model: str) -> None: """Test Anthropic completion with tool calling.""" try: diff --git a/src/api/settings.py b/src/api/settings.py index 54ad477e..af43b03a 100644 --- a/src/api/settings.py +++ b/src/api/settings.py @@ -897,6 +897,7 @@ async def onboarding(request, flows_service, session_manager=None): ) # Validate provider setup before initializing OpenSearch index + # Use lightweight validation (test_completion=False) to avoid consuming credits during onboarding try: from api.provider_validation import validate_provider_setup @@ -905,13 +906,14 @@ async def onboarding(request, flows_service, session_manager=None): llm_provider = current_config.agent.llm_provider.lower() llm_provider_config = current_config.get_llm_provider_config() - logger.info(f"Validating LLM provider setup for {llm_provider}") + logger.info(f"Validating LLM provider setup for {llm_provider} (lightweight)") await validate_provider_setup( provider=llm_provider, api_key=getattr(llm_provider_config, "api_key", None), llm_model=current_config.agent.llm_model, endpoint=getattr(llm_provider_config, "endpoint", None), project_id=getattr(llm_provider_config, "project_id", None), + test_completion=False, # Lightweight validation - no credits consumed ) logger.info(f"LLM provider setup validation completed successfully for {llm_provider}") @@ -920,13 +922,14 @@ async def onboarding(request, flows_service, session_manager=None): embedding_provider = current_config.knowledge.embedding_provider.lower() embedding_provider_config = current_config.get_embedding_provider_config() - logger.info(f"Validating embedding provider setup for {embedding_provider}") + logger.info(f"Validating embedding provider setup for {embedding_provider} (lightweight)") await validate_provider_setup( provider=embedding_provider, api_key=getattr(embedding_provider_config, "api_key", None), embedding_model=current_config.knowledge.embedding_model, endpoint=getattr(embedding_provider_config, "endpoint", None), project_id=getattr(embedding_provider_config, "project_id", None), + test_completion=False, # Lightweight validation - no credits consumed ) logger.info(f"Embedding provider setup validation completed successfully for {embedding_provider}") except Exception as e: diff --git a/src/services/models_service.py b/src/services/models_service.py index 979bcec2..546720c3 100644 --- a/src/services/models_service.py +++ b/src/services/models_service.py @@ -50,7 +50,7 @@ class ModelsService: self.session_manager = None async def get_openai_models(self, api_key: str) -> Dict[str, List[Dict[str, str]]]: - """Fetch available models from OpenAI API""" + """Fetch available models from OpenAI API with lightweight validation""" try: headers = { "Authorization": f"Bearer {api_key}", @@ -58,6 +58,8 @@ class ModelsService: } async with httpx.AsyncClient() as client: + # Lightweight validation: just check if API key is valid + # This doesn't consume credits, only validates the key response = await client.get( "https://api.openai.com/v1/models", headers=headers, timeout=10.0 ) @@ -101,6 +103,7 @@ class ModelsService: key=lambda x: (not x.get("default", False), x["value"]) ) + logger.info("OpenAI API key validated successfully without consuming credits") return { "language_models": language_models, "embedding_models": embedding_models, @@ -389,38 +392,12 @@ class ModelsService: } ) - # Validate credentials with the first available LLM model - if language_models: - first_llm_model = language_models[0]["value"] - - async with httpx.AsyncClient() as client: - validation_url = f"{watson_endpoint}/ml/v1/text/generation" - validation_params = {"version": "2024-09-16"} - validation_payload = { - "input": "test", - "model_id": first_llm_model, - "project_id": project_id, - "parameters": { - "max_new_tokens": 1, - }, - } - - validation_response = await client.post( - validation_url, - headers=headers, - params=validation_params, - json=validation_payload, - timeout=10.0, - ) - - if validation_response.status_code != 200: - raise Exception( - f"Invalid credentials or endpoint: {validation_response.status_code} - {validation_response.text}" - ) - - logger.info(f"IBM Watson credentials validated successfully using model: {first_llm_model}") + # Lightweight validation: API key is already validated by successfully getting bearer token + # No need to make a generation request that consumes credits + if bearer_token: + logger.info("IBM Watson API key validated successfully without consuming credits") else: - logger.warning("No language models available to validate credentials") + logger.warning("No bearer token available - API key validation may have failed") if not language_models and not embedding_models: raise Exception("No IBM models retrieved from API")