diff --git a/flows/components/opensearch.py b/flows/components/opensearch.py index 7902161b..69d65043 100644 --- a/flows/components/opensearch.py +++ b/flows/components/opensearch.py @@ -865,58 +865,99 @@ class OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreCompon metadatas.append(data_copy) self.log(metadatas) - # Generate embeddings (threaded for concurrency) with retries - def embed_chunk(chunk_text: str) -> list[float]: - return selected_embedding.embed_documents([chunk_text])[0] + # Generate embeddings with rate-limit-aware retry logic using tenacity + from tenacity import ( + retry, + retry_if_exception, + stop_after_attempt, + wait_exponential, + ) - vectors: list[list[float]] | None = None - last_exception: Exception | None = None - delay = 1.0 - attempts = 0 - max_attempts = 3 + def is_rate_limit_error(exception: Exception) -> bool: + """Check if exception is a rate limit error (429).""" + error_str = str(exception).lower() + return "429" in error_str or "rate_limit" in error_str or "rate limit" in error_str + + def is_other_retryable_error(exception: Exception) -> bool: + """Check if exception is retryable but not a rate limit error.""" + # Retry on most exceptions except for specific non-retryable ones + # Add other non-retryable exceptions here if needed + return not is_rate_limit_error(exception) + + # Create retry decorator for rate limit errors (longer backoff) + retry_on_rate_limit = retry( + retry=retry_if_exception(is_rate_limit_error), + stop=stop_after_attempt(5), + wait=wait_exponential(multiplier=2, min=2, max=30), + reraise=True, + before_sleep=lambda retry_state: logger.warning( + f"Rate limit hit for chunk (attempt {retry_state.attempt_number}/5), " + f"backing off for {retry_state.next_action.sleep:.1f}s" + ), + ) + + # Create retry decorator for other errors (shorter backoff) + retry_on_other_errors = retry( + retry=retry_if_exception(is_other_retryable_error), + stop=stop_after_attempt(3), + wait=wait_exponential(multiplier=1, min=1, max=8), + reraise=True, + before_sleep=lambda retry_state: logger.warning( + f"Error embedding chunk (attempt {retry_state.attempt_number}/3), " + f"retrying in {retry_state.next_action.sleep:.1f}s: {retry_state.outcome.exception()}" + ), + ) + + def embed_chunk_with_retry(chunk_text: str, chunk_idx: int) -> list[float]: + """Embed a single chunk with rate-limit-aware retry logic.""" + + @retry_on_rate_limit + @retry_on_other_errors + def _embed(text: str) -> list[float]: + return selected_embedding.embed_documents([text])[0] - while attempts < max_attempts: - attempts += 1 try: - # Restrict concurrency for IBM/Watsonx models to avoid rate limits - is_ibm = (embedding_model and "ibm" in str(embedding_model).lower()) or ( - selected_embedding and "watsonx" in type(selected_embedding).__name__.lower() + return _embed(chunk_text) + except Exception as e: + logger.error( + f"Failed to embed chunk {chunk_idx} after all retries: {e}", + error=str(e), ) - logger.debug(f"Is IBM: {is_ibm}") - max_workers = 1 if is_ibm else min(max(len(texts), 1), 8) + raise - with ThreadPoolExecutor(max_workers=max_workers) as executor: - futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)} - vectors = [None] * len(texts) - for future in as_completed(futures): - idx = futures[future] - vectors[idx] = future.result() - break - except Exception as exc: - last_exception = exc - if attempts >= max_attempts: - logger.error( - f"Embedding generation failed for model {embedding_model} after retries", - error=str(exc), - ) - raise - logger.warning( - "Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs", - embedding_model, - attempts, - max_attempts, - delay, - ) - time.sleep(delay) - delay = min(delay * 2, 8.0) + # Restrict concurrency for IBM/Watsonx models to avoid rate limits + is_ibm = (embedding_model and "ibm" in str(embedding_model).lower()) or ( + selected_embedding and "watsonx" in type(selected_embedding).__name__.lower() + ) + logger.debug(f"Is IBM: {is_ibm}") - if vectors is None: - raise RuntimeError( - f"Embedding generation failed for {embedding_model}: {last_exception}" - if last_exception - else f"Embedding generation failed for {embedding_model}" + # For IBM models, use sequential processing with rate limiting + # For other models, use parallel processing + vectors: list[list[float]] = [None] * len(texts) + + if is_ibm: + # Sequential processing with inter-request delay for IBM models + inter_request_delay = 0.6 # ~1.67 req/s, safely under 2 req/s limit + logger.info( + f"Using sequential processing for IBM model with {inter_request_delay}s delay between requests" ) + for idx, chunk in enumerate(texts): + if idx > 0: + # Add delay between requests (but not before the first one) + time.sleep(inter_request_delay) + vectors[idx] = embed_chunk_with_retry(chunk, idx) + else: + # Parallel processing for non-IBM models + max_workers = min(max(len(texts), 1), 8) + logger.debug(f"Using parallel processing with {max_workers} workers") + + with ThreadPoolExecutor(max_workers=max_workers) as executor: + futures = {executor.submit(embed_chunk_with_retry, chunk, idx): idx for idx, chunk in enumerate(texts)} + for future in as_completed(futures): + idx = futures[future] + vectors[idx] = future.result() + if not vectors: self.log(f"No vectors generated from documents for model {embedding_model}.") return diff --git a/flows/components/opensearch_multimodel.py b/flows/components/opensearch_multimodel.py new file mode 100644 index 00000000..69d65043 --- /dev/null +++ b/flows/components/opensearch_multimodel.py @@ -0,0 +1,1735 @@ +from __future__ import annotations + +import copy +import json +import time +import uuid +from concurrent.futures import ThreadPoolExecutor, as_completed +from typing import Any + +from opensearchpy import OpenSearch, helpers +from opensearchpy.exceptions import OpenSearchException, RequestError + +from lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store +from lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection +from lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput +from lfx.log import logger +from lfx.schema.data import Data + + +def normalize_model_name(model_name: str) -> str: + """Normalize embedding model name for use as field suffix. + + Converts model names to valid OpenSearch field names by replacing + special characters and ensuring alphanumeric format. + + Args: + model_name: Original embedding model name (e.g., "text-embedding-3-small") + + Returns: + Normalized field suffix (e.g., "text_embedding_3_small") + """ + normalized = model_name.lower() + # Replace common separators with underscores + normalized = normalized.replace("-", "_").replace(":", "_").replace("/", "_").replace(".", "_") + # Remove any non-alphanumeric characters except underscores + normalized = "".join(c if c.isalnum() or c == "_" else "_" for c in normalized) + # Remove duplicate underscores + while "__" in normalized: + normalized = normalized.replace("__", "_") + return normalized.strip("_") + + +def get_embedding_field_name(model_name: str) -> str: + """Get the dynamic embedding field name for a model. + + Args: + model_name: Embedding model name + + Returns: + Field name in format: chunk_embedding_{normalized_model_name} + """ + logger.info(f"chunk_embedding_{normalize_model_name(model_name)}") + return f"chunk_embedding_{normalize_model_name(model_name)}" + + +@vector_store_connection +class OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent): + """OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities. + + This component provides vector storage and retrieval using OpenSearch, combining semantic + similarity search (KNN) with keyword-based search for optimal results. It supports: + - Multiple embedding models per index with dynamic field names + - Automatic detection and querying of all available embedding models + - Parallel embedding generation for multi-model search + - Document ingestion with model tracking + - Advanced filtering and aggregations + - Flexible authentication options + + Features: + - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name}) + - Hybrid search combining multiple KNN queries (dis_max) + keyword matching + - Auto-detection of available models in the index + - Parallel query embedding generation for all detected models + - Vector storage with configurable engines (jvector, nmslib, faiss, lucene) + - Flexible authentication (Basic auth, JWT tokens) + + Model Name Resolution: + - Priority: deployment > model > model_name attributes + - This ensures correct matching between embedding objects and index fields + - When multiple embeddings are provided, specify embedding_model_name to select which one to use + - During search, each detected model in the index is matched to its corresponding embedding object + """ + + display_name: str = "OpenSearch (Multi-Model Multi-Embedding)" + icon: str = "OpenSearch" + description: str = ( + "Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search." + ) + + # Keys we consider baseline + default_keys: list[str] = [ + "opensearch_url", + "index_name", + *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc. + "embedding", + "embedding_model_name", + "vector_field", + "number_of_results", + "auth_mode", + "username", + "password", + "jwt_token", + "jwt_header", + "bearer_prefix", + "use_ssl", + "verify_certs", + "filter_expression", + "engine", + "space_type", + "ef_construction", + "m", + "num_candidates", + "docs_metadata", + ] + + inputs = [ + TableInput( + name="docs_metadata", + display_name="Document Metadata", + info=( + "Additional metadata key-value pairs to be added to all ingested documents. " + "Useful for tagging documents with source information, categories, or other custom attributes." + ), + table_schema=[ + { + "name": "key", + "display_name": "Key", + "type": "str", + "description": "Key name", + }, + { + "name": "value", + "display_name": "Value", + "type": "str", + "description": "Value of the metadata", + }, + ], + value=[], + input_types=["Data"], + ), + StrInput( + name="opensearch_url", + display_name="OpenSearch URL", + value="http://localhost:9200", + info=( + "The connection URL for your OpenSearch cluster " + "(e.g., http://localhost:9200 for local development or your cloud endpoint)." + ), + ), + StrInput( + name="index_name", + display_name="Index Name", + value="langflow", + info=( + "The OpenSearch index name where documents will be stored and searched. " + "Will be created automatically if it doesn't exist." + ), + ), + DropdownInput( + name="engine", + display_name="Vector Engine", + options=["jvector", "nmslib", "faiss", "lucene"], + value="jvector", + info=( + "Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. " + "Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'." + ), + advanced=True, + ), + DropdownInput( + name="space_type", + display_name="Distance Metric", + options=["l2", "l1", "cosinesimil", "linf", "innerproduct"], + value="l2", + info=( + "Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, " + "'cosinesimil' for cosine similarity, 'innerproduct' for dot product." + ), + advanced=True, + ), + IntInput( + name="ef_construction", + display_name="EF Construction", + value=512, + info=( + "Size of the dynamic candidate list during index construction. " + "Higher values improve recall but increase indexing time and memory usage." + ), + advanced=True, + ), + IntInput( + name="m", + display_name="M Parameter", + value=16, + info=( + "Number of bidirectional connections for each vector in the HNSW graph. " + "Higher values improve search quality but increase memory usage and indexing time." + ), + advanced=True, + ), + IntInput( + name="num_candidates", + display_name="Candidate Pool Size", + value=1000, + info=( + "Number of approximate neighbors to consider for each KNN query. " + "Some OpenSearch deployments do not support this parameter; set to 0 to disable." + ), + advanced=True, + ), + *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc. + HandleInput(name="embedding", display_name="Embedding", input_types=["Embeddings"], is_list=True), + StrInput( + name="embedding_model_name", + display_name="Embedding Model Name", + value="", + info=( + "Name of the embedding model to use for ingestion. This selects which embedding from the list " + "will be used to embed documents. Matches on deployment, model, model_id, or model_name. " + "For duplicate deployments, use combined format: 'deployment:model' " + "(e.g., 'text-embedding-ada-002:text-embedding-3-large'). " + "Leave empty to use the first embedding. Error message will show all available identifiers." + ), + advanced=False, + ), + StrInput( + name="vector_field", + display_name="Legacy Vector Field Name", + value="chunk_embedding", + advanced=True, + info=( + "Legacy field name for backward compatibility. New documents use dynamic fields " + "(chunk_embedding_{model_name}) based on the embedding_model_name." + ), + ), + IntInput( + name="number_of_results", + display_name="Default Result Limit", + value=10, + advanced=True, + info=( + "Default maximum number of search results to return when no limit is " + "specified in the filter expression." + ), + ), + MultilineInput( + name="filter_expression", + display_name="Search Filters (JSON)", + value="", + info=( + "Optional JSON configuration for search filtering, result limits, and score thresholds.\n\n" + "Format 1 - Explicit filters:\n" + '{"filter": [{"term": {"filename":"doc.pdf"}}, ' + '{"terms":{"owner":["user1","user2"]}}], "limit": 10, "score_threshold": 1.6}\n\n' + "Format 2 - Context-style mapping:\n" + '{"data_sources":["file.pdf"], "document_types":["application/pdf"], "owners":["user123"]}\n\n' + "Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters." + ), + ), + # ----- Auth controls (dynamic) ----- + DropdownInput( + name="auth_mode", + display_name="Authentication Mode", + value="basic", + options=["basic", "jwt"], + info=( + "Authentication method: 'basic' for username/password authentication, " + "or 'jwt' for JSON Web Token (Bearer) authentication." + ), + real_time_refresh=True, + advanced=False, + ), + StrInput( + name="username", + display_name="Username", + value="admin", + show=True, + ), + SecretStrInput( + name="password", + display_name="OpenSearch Password", + value="admin", + show=True, + ), + SecretStrInput( + name="jwt_token", + display_name="JWT Token", + value="JWT", + load_from_db=False, + show=False, + info=( + "Valid JSON Web Token for authentication. " + "Will be sent in the Authorization header (with optional 'Bearer ' prefix)." + ), + ), + StrInput( + name="jwt_header", + display_name="JWT Header Name", + value="Authorization", + show=False, + advanced=True, + ), + BoolInput( + name="bearer_prefix", + display_name="Prefix 'Bearer '", + value=True, + show=False, + advanced=True, + ), + # ----- TLS ----- + BoolInput( + name="use_ssl", + display_name="Use SSL/TLS", + value=True, + advanced=True, + info="Enable SSL/TLS encryption for secure connections to OpenSearch.", + ), + BoolInput( + name="verify_certs", + display_name="Verify SSL Certificates", + value=False, + advanced=True, + info=( + "Verify SSL certificates when connecting. " + "Disable for self-signed certificates in development environments." + ), + ), + ] + + def _get_embedding_model_name(self, embedding_obj=None) -> str: + """Get the embedding model name from component config or embedding object. + + Priority: deployment > model > model_id > model_name + This ensures we use the actual model being deployed, not just the configured model. + Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.) + + Args: + embedding_obj: Specific embedding object to get name from (optional) + + Returns: + Embedding model name + + Raises: + ValueError: If embedding model name cannot be determined + """ + # First try explicit embedding_model_name input + if hasattr(self, "embedding_model_name") and self.embedding_model_name: + return self.embedding_model_name.strip() + + # Try to get from provided embedding object + if embedding_obj: + # Priority: deployment > model > model_id > model_name + if hasattr(embedding_obj, "deployment") and embedding_obj.deployment: + return str(embedding_obj.deployment) + if hasattr(embedding_obj, "model") and embedding_obj.model: + return str(embedding_obj.model) + if hasattr(embedding_obj, "model_id") and embedding_obj.model_id: + return str(embedding_obj.model_id) + if hasattr(embedding_obj, "model_name") and embedding_obj.model_name: + return str(embedding_obj.model_name) + + # Try to get from embedding component (legacy single embedding) + if hasattr(self, "embedding") and self.embedding: + # Handle list of embeddings + if isinstance(self.embedding, list) and len(self.embedding) > 0: + first_emb = self.embedding[0] + if hasattr(first_emb, "deployment") and first_emb.deployment: + return str(first_emb.deployment) + if hasattr(first_emb, "model") and first_emb.model: + return str(first_emb.model) + if hasattr(first_emb, "model_id") and first_emb.model_id: + return str(first_emb.model_id) + if hasattr(first_emb, "model_name") and first_emb.model_name: + return str(first_emb.model_name) + # Handle single embedding + elif not isinstance(self.embedding, list): + if hasattr(self.embedding, "deployment") and self.embedding.deployment: + return str(self.embedding.deployment) + if hasattr(self.embedding, "model") and self.embedding.model: + return str(self.embedding.model) + if hasattr(self.embedding, "model_id") and self.embedding.model_id: + return str(self.embedding.model_id) + if hasattr(self.embedding, "model_name") and self.embedding.model_name: + return str(self.embedding.model_name) + + msg = ( + "Could not determine embedding model name. " + "Please set the 'embedding_model_name' field or ensure the embedding component " + "has a 'deployment', 'model', 'model_id', or 'model_name' attribute." + ) + raise ValueError(msg) + + # ---------- helper functions for index management ---------- + def _default_text_mapping( + self, + dim: int, + engine: str = "jvector", + space_type: str = "l2", + ef_search: int = 512, + ef_construction: int = 100, + m: int = 16, + vector_field: str = "vector_field", + ) -> dict[str, Any]: + """Create the default OpenSearch index mapping for vector search. + + This method generates the index configuration with k-NN settings optimized + for approximate nearest neighbor search using the specified vector engine. + Includes the embedding_model keyword field for tracking which model was used. + + Args: + dim: Dimensionality of the vector embeddings + engine: Vector search engine (jvector, nmslib, faiss, lucene) + space_type: Distance metric for similarity calculation + ef_search: Size of dynamic list used during search + ef_construction: Size of dynamic list used during index construction + m: Number of bidirectional links for each vector + vector_field: Name of the field storing vector embeddings + + Returns: + Dictionary containing OpenSearch index mapping configuration + """ + return { + "settings": {"index": {"knn": True, "knn.algo_param.ef_search": ef_search}}, + "mappings": { + "properties": { + vector_field: { + "type": "knn_vector", + "dimension": dim, + "method": { + "name": "disk_ann", + "space_type": space_type, + "engine": engine, + "parameters": {"ef_construction": ef_construction, "m": m}, + }, + }, + "embedding_model": {"type": "keyword"}, # Track which model was used + "embedding_dimensions": {"type": "integer"}, + } + }, + } + + def _ensure_embedding_field_mapping( + self, + client: OpenSearch, + index_name: str, + field_name: str, + dim: int, + engine: str, + space_type: str, + ef_construction: int, + m: int, + ) -> None: + """Lazily add a dynamic embedding field to the index if it doesn't exist. + + This allows adding new embedding models without recreating the entire index. + Also ensures the embedding_model tracking field exists. + + Args: + client: OpenSearch client instance + index_name: Target index name + field_name: Dynamic field name for this embedding model + dim: Vector dimensionality + engine: Vector search engine + space_type: Distance metric + ef_construction: Construction parameter + m: HNSW parameter + """ + try: + mapping = { + "properties": { + field_name: { + "type": "knn_vector", + "dimension": dim, + "method": { + "name": "disk_ann", + "space_type": space_type, + "engine": engine, + "parameters": {"ef_construction": ef_construction, "m": m}, + }, + }, + # Also ensure the embedding_model tracking field exists as keyword + "embedding_model": {"type": "keyword"}, + "embedding_dimensions": {"type": "integer"}, + } + } + client.indices.put_mapping(index=index_name, body=mapping) + logger.info(f"Added/updated embedding field mapping: {field_name}") + except Exception as e: + logger.warning(f"Could not add embedding field mapping for {field_name}: {e}") + raise + + properties = self._get_index_properties(client) + if not self._is_knn_vector_field(properties, field_name): + msg = f"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}" + logger.aerror(msg) + raise ValueError(msg) + + def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None: + """Validate engine compatibility with Amazon OpenSearch Serverless (AOSS). + + Amazon OpenSearch Serverless has restrictions on which vector engines + can be used. This method ensures the selected engine is compatible. + + Args: + is_aoss: Whether the connection is to Amazon OpenSearch Serverless + engine: The selected vector search engine + + Raises: + ValueError: If AOSS is used with an incompatible engine + """ + if is_aoss and engine not in {"nmslib", "faiss"}: + msg = "Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines" + raise ValueError(msg) + + def _is_aoss_enabled(self, http_auth: Any) -> bool: + """Determine if Amazon OpenSearch Serverless (AOSS) is being used. + + Args: + http_auth: The HTTP authentication object + + Returns: + True if AOSS is enabled, False otherwise + """ + return http_auth is not None and hasattr(http_auth, "service") and http_auth.service == "aoss" + + def _bulk_ingest_embeddings( + self, + client: OpenSearch, + index_name: str, + embeddings: list[list[float]], + texts: list[str], + metadatas: list[dict] | None = None, + ids: list[str] | None = None, + vector_field: str = "vector_field", + text_field: str = "text", + embedding_model: str = "unknown", + mapping: dict | None = None, + max_chunk_bytes: int | None = 1 * 1024 * 1024, + *, + is_aoss: bool = False, + ) -> list[str]: + """Efficiently ingest multiple documents with embeddings into OpenSearch. + + This method uses bulk operations to insert documents with their vector + embeddings and metadata into the specified OpenSearch index. Each document + is tagged with the embedding_model name for tracking. + + Args: + client: OpenSearch client instance + index_name: Target index for document storage + embeddings: List of vector embeddings for each document + texts: List of document texts + metadatas: Optional metadata dictionaries for each document + ids: Optional document IDs (UUIDs generated if not provided) + vector_field: Field name for storing vector embeddings + text_field: Field name for storing document text + embedding_model: Name of the embedding model used + mapping: Optional index mapping configuration + max_chunk_bytes: Maximum size per bulk request chunk + is_aoss: Whether using Amazon OpenSearch Serverless + + Returns: + List of document IDs that were successfully ingested + """ + if not mapping: + mapping = {} + + requests = [] + return_ids = [] + vector_dimensions = len(embeddings[0]) if embeddings else None + + for i, text in enumerate(texts): + metadata = metadatas[i] if metadatas else {} + if vector_dimensions is not None and "embedding_dimensions" not in metadata: + metadata = {**metadata, "embedding_dimensions": vector_dimensions} + _id = ids[i] if ids else str(uuid.uuid4()) + request = { + "_op_type": "index", + "_index": index_name, + vector_field: embeddings[i], + text_field: text, + "embedding_model": embedding_model, # Track which model was used + **metadata, + } + if is_aoss: + request["id"] = _id + else: + request["_id"] = _id + requests.append(request) + return_ids.append(_id) + if metadatas: + self.log(f"Sample metadata: {metadatas[0] if metadatas else {}}") + helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes) + return return_ids + + # ---------- auth / client ---------- + def _build_auth_kwargs(self) -> dict[str, Any]: + """Build authentication configuration for OpenSearch client. + + Constructs the appropriate authentication parameters based on the + selected auth mode (basic username/password or JWT token). + + Returns: + Dictionary containing authentication configuration + + Raises: + ValueError: If required authentication parameters are missing + """ + mode = (self.auth_mode or "basic").strip().lower() + if mode == "jwt": + token = (self.jwt_token or "").strip() + if not token: + msg = "Auth Mode is 'jwt' but no jwt_token was provided." + raise ValueError(msg) + header_name = (self.jwt_header or "Authorization").strip() + header_value = f"Bearer {token}" if self.bearer_prefix else token + return {"headers": {header_name: header_value}} + user = (self.username or "").strip() + pwd = (self.password or "").strip() + if not user or not pwd: + msg = "Auth Mode is 'basic' but username/password are missing." + raise ValueError(msg) + return {"http_auth": (user, pwd)} + + def build_client(self) -> OpenSearch: + """Create and configure an OpenSearch client instance. + + Returns: + Configured OpenSearch client ready for operations + """ + auth_kwargs = self._build_auth_kwargs() + return OpenSearch( + hosts=[self.opensearch_url], + use_ssl=self.use_ssl, + verify_certs=self.verify_certs, + ssl_assert_hostname=False, + ssl_show_warn=False, + **auth_kwargs, + ) + + @check_cached_vector_store + def build_vector_store(self) -> OpenSearch: + # Return raw OpenSearch client as our "vector store." + client = self.build_client() + + # Check if we're in ingestion-only mode (no search query) + has_search_query = bool((self.search_query or "").strip()) + if not has_search_query: + logger.debug("Ingestion-only mode activated: search operations will be skipped") + logger.debug("Starting ingestion mode...") + + logger.warning(f"Embedding: {self.embedding}") + self._add_documents_to_vector_store(client=client) + return client + + # ---------- ingest ---------- + def _add_documents_to_vector_store(self, client: OpenSearch) -> None: + """Process and ingest documents into the OpenSearch vector store. + + This method handles the complete document ingestion pipeline: + - Prepares document data and metadata + - Generates vector embeddings using the selected model + - Creates appropriate index mappings with dynamic field names + - Bulk inserts documents with vectors and model tracking + + Args: + client: OpenSearch client for performing operations + """ + logger.debug("[INGESTION] _add_documents_to_vector_store called") + # Convert DataFrame to Data if needed using parent's method + self.ingest_data = self._prepare_ingest_data() + + logger.debug( + f"[INGESTION] ingest_data type: " + f"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}" + ) + logger.debug( + f"[INGESTION] ingest_data content: " + f"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}" + ) + + docs = self.ingest_data or [] + if not docs: + logger.debug("Ingestion complete: No documents provided") + return + + if not self.embedding: + msg = "Embedding handle is required to embed documents." + raise ValueError(msg) + + # Normalize embedding to list first + embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding] + + # Filter out None values (fail-safe mode) - do this BEFORE checking if empty + embeddings_list = [e for e in embeddings_list if e is not None] + + # NOW check if we have any valid embeddings left after filtering + if not embeddings_list: + logger.warning("All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.") + self.log("Embedding returned None (fail-safe mode enabled). Skipping document ingestion.") + return + + logger.debug(f"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}") + self.log(f"Available embedding models: {len(embeddings_list)}") + + # Select the embedding to use for ingestion + selected_embedding = None + embedding_model = None + + # If embedding_model_name is specified, find matching embedding + if hasattr(self, "embedding_model_name") and self.embedding_model_name and self.embedding_model_name.strip(): + target_model_name = self.embedding_model_name.strip() + self.log(f"Looking for embedding model: {target_model_name}") + + for emb_obj in embeddings_list: + # Check all possible model identifiers (deployment, model, model_id, model_name) + # Also check available_models list from EmbeddingsWithModels + possible_names = [] + deployment = getattr(emb_obj, "deployment", None) + model = getattr(emb_obj, "model", None) + model_id = getattr(emb_obj, "model_id", None) + model_name = getattr(emb_obj, "model_name", None) + available_models_attr = getattr(emb_obj, "available_models", None) + + if deployment: + possible_names.append(str(deployment)) + if model: + possible_names.append(str(model)) + if model_id: + possible_names.append(str(model_id)) + if model_name: + possible_names.append(str(model_name)) + + # Also add combined identifier + if deployment and model and deployment != model: + possible_names.append(f"{deployment}:{model}") + + # Add all models from available_models dict + if available_models_attr and isinstance(available_models_attr, dict): + possible_names.extend( + str(model_key).strip() + for model_key in available_models_attr + if model_key and str(model_key).strip() + ) + + # Match if target matches any of the possible names + if target_model_name in possible_names: + # Check if target is in available_models dict - use dedicated instance + if ( + available_models_attr + and isinstance(available_models_attr, dict) + and target_model_name in available_models_attr + ): + # Use the dedicated embedding instance from the dict + selected_embedding = available_models_attr[target_model_name] + embedding_model = target_model_name + self.log(f"Found dedicated embedding instance for '{embedding_model}' in available_models dict") + else: + # Traditional identifier match + selected_embedding = emb_obj + embedding_model = self._get_embedding_model_name(emb_obj) + self.log(f"Found matching embedding model: {embedding_model} (matched on: {target_model_name})") + break + + if not selected_embedding: + # Build detailed list of available embeddings with all their identifiers + available_info = [] + for idx, emb in enumerate(embeddings_list): + emb_type = type(emb).__name__ + identifiers = [] + deployment = getattr(emb, "deployment", None) + model = getattr(emb, "model", None) + model_id = getattr(emb, "model_id", None) + model_name = getattr(emb, "model_name", None) + available_models_attr = getattr(emb, "available_models", None) + + if deployment: + identifiers.append(f"deployment='{deployment}'") + if model: + identifiers.append(f"model='{model}'") + if model_id: + identifiers.append(f"model_id='{model_id}'") + if model_name: + identifiers.append(f"model_name='{model_name}'") + + # Add combined identifier as an option + if deployment and model and deployment != model: + identifiers.append(f"combined='{deployment}:{model}'") + + # Add available_models dict if present + if available_models_attr and isinstance(available_models_attr, dict): + identifiers.append(f"available_models={list(available_models_attr.keys())}") + + available_info.append( + f" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}" + ) + + msg = ( + f"Embedding model '{target_model_name}' not found in available embeddings.\n\n" + f"Available embeddings:\n" + "\n".join(available_info) + "\n\n" + "Please set 'embedding_model_name' to one of the identifier values shown above " + "(use the value after the '=' sign, without quotes).\n" + "For duplicate deployments, use the 'combined' format.\n" + "Or leave it empty to use the first embedding." + ) + raise ValueError(msg) + else: + # Use first embedding if no model name specified + selected_embedding = embeddings_list[0] + embedding_model = self._get_embedding_model_name(selected_embedding) + self.log(f"No embedding_model_name specified, using first embedding: {embedding_model}") + + dynamic_field_name = get_embedding_field_name(embedding_model) + + logger.info(f"Selected embedding model for ingestion: '{embedding_model}'") + self.log(f"Using embedding model for ingestion: {embedding_model}") + self.log(f"Dynamic vector field: {dynamic_field_name}") + + # Log embedding details for debugging + if hasattr(selected_embedding, "deployment"): + logger.info(f"Embedding deployment: {selected_embedding.deployment}") + if hasattr(selected_embedding, "model"): + logger.info(f"Embedding model: {selected_embedding.model}") + if hasattr(selected_embedding, "model_id"): + logger.info(f"Embedding model_id: {selected_embedding.model_id}") + if hasattr(selected_embedding, "dimensions"): + logger.info(f"Embedding dimensions: {selected_embedding.dimensions}") + if hasattr(selected_embedding, "available_models"): + logger.info(f"Embedding available_models: {selected_embedding.available_models}") + + # No model switching needed - each model in available_models has its own dedicated instance + # The selected_embedding is already configured correctly for the target model + logger.info(f"Using embedding instance for '{embedding_model}' - pre-configured and ready to use") + + # Extract texts and metadata from documents + texts = [] + metadatas = [] + # Process docs_metadata table input into a dict + additional_metadata = {} + logger.debug(f"[LF] Docs metadata {self.docs_metadata}") + if hasattr(self, "docs_metadata") and self.docs_metadata: + logger.info(f"[LF] Docs metadata {self.docs_metadata}") + if isinstance(self.docs_metadata[-1], Data): + logger.info(f"[LF] Docs metadata is a Data object {self.docs_metadata}") + self.docs_metadata = self.docs_metadata[-1].data + logger.info(f"[LF] Docs metadata is a Data object {self.docs_metadata}") + additional_metadata.update(self.docs_metadata) + else: + for item in self.docs_metadata: + if isinstance(item, dict) and "key" in item and "value" in item: + additional_metadata[item["key"]] = item["value"] + # Replace string "None" values with actual None + for key, value in additional_metadata.items(): + if value == "None": + additional_metadata[key] = None + logger.info(f"[LF] Additional metadata {additional_metadata}") + for doc_obj in docs: + data_copy = json.loads(doc_obj.model_dump_json()) + text = data_copy.pop(doc_obj.text_key, doc_obj.default_value) + texts.append(text) + + # Merge additional metadata from table input + data_copy.update(additional_metadata) + + metadatas.append(data_copy) + self.log(metadatas) + + # Generate embeddings with rate-limit-aware retry logic using tenacity + from tenacity import ( + retry, + retry_if_exception, + stop_after_attempt, + wait_exponential, + ) + + def is_rate_limit_error(exception: Exception) -> bool: + """Check if exception is a rate limit error (429).""" + error_str = str(exception).lower() + return "429" in error_str or "rate_limit" in error_str or "rate limit" in error_str + + def is_other_retryable_error(exception: Exception) -> bool: + """Check if exception is retryable but not a rate limit error.""" + # Retry on most exceptions except for specific non-retryable ones + # Add other non-retryable exceptions here if needed + return not is_rate_limit_error(exception) + + # Create retry decorator for rate limit errors (longer backoff) + retry_on_rate_limit = retry( + retry=retry_if_exception(is_rate_limit_error), + stop=stop_after_attempt(5), + wait=wait_exponential(multiplier=2, min=2, max=30), + reraise=True, + before_sleep=lambda retry_state: logger.warning( + f"Rate limit hit for chunk (attempt {retry_state.attempt_number}/5), " + f"backing off for {retry_state.next_action.sleep:.1f}s" + ), + ) + + # Create retry decorator for other errors (shorter backoff) + retry_on_other_errors = retry( + retry=retry_if_exception(is_other_retryable_error), + stop=stop_after_attempt(3), + wait=wait_exponential(multiplier=1, min=1, max=8), + reraise=True, + before_sleep=lambda retry_state: logger.warning( + f"Error embedding chunk (attempt {retry_state.attempt_number}/3), " + f"retrying in {retry_state.next_action.sleep:.1f}s: {retry_state.outcome.exception()}" + ), + ) + + def embed_chunk_with_retry(chunk_text: str, chunk_idx: int) -> list[float]: + """Embed a single chunk with rate-limit-aware retry logic.""" + + @retry_on_rate_limit + @retry_on_other_errors + def _embed(text: str) -> list[float]: + return selected_embedding.embed_documents([text])[0] + + try: + return _embed(chunk_text) + except Exception as e: + logger.error( + f"Failed to embed chunk {chunk_idx} after all retries: {e}", + error=str(e), + ) + raise + + # Restrict concurrency for IBM/Watsonx models to avoid rate limits + is_ibm = (embedding_model and "ibm" in str(embedding_model).lower()) or ( + selected_embedding and "watsonx" in type(selected_embedding).__name__.lower() + ) + logger.debug(f"Is IBM: {is_ibm}") + + # For IBM models, use sequential processing with rate limiting + # For other models, use parallel processing + vectors: list[list[float]] = [None] * len(texts) + + if is_ibm: + # Sequential processing with inter-request delay for IBM models + inter_request_delay = 0.6 # ~1.67 req/s, safely under 2 req/s limit + logger.info( + f"Using sequential processing for IBM model with {inter_request_delay}s delay between requests" + ) + + for idx, chunk in enumerate(texts): + if idx > 0: + # Add delay between requests (but not before the first one) + time.sleep(inter_request_delay) + vectors[idx] = embed_chunk_with_retry(chunk, idx) + else: + # Parallel processing for non-IBM models + max_workers = min(max(len(texts), 1), 8) + logger.debug(f"Using parallel processing with {max_workers} workers") + + with ThreadPoolExecutor(max_workers=max_workers) as executor: + futures = {executor.submit(embed_chunk_with_retry, chunk, idx): idx for idx, chunk in enumerate(texts)} + for future in as_completed(futures): + idx = futures[future] + vectors[idx] = future.result() + + if not vectors: + self.log(f"No vectors generated from documents for model {embedding_model}.") + return + + # Get vector dimension for mapping + dim = len(vectors[0]) if vectors else 768 # default fallback + + # Check for AOSS + auth_kwargs = self._build_auth_kwargs() + is_aoss = self._is_aoss_enabled(auth_kwargs.get("http_auth")) + + # Validate engine with AOSS + engine = getattr(self, "engine", "jvector") + self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine) + + # Create mapping with proper KNN settings + space_type = getattr(self, "space_type", "l2") + ef_construction = getattr(self, "ef_construction", 512) + m = getattr(self, "m", 16) + + mapping = self._default_text_mapping( + dim=dim, + engine=engine, + space_type=space_type, + ef_construction=ef_construction, + m=m, + vector_field=dynamic_field_name, # Use dynamic field name + ) + + # Ensure index exists with baseline mapping + try: + if not client.indices.exists(index=self.index_name): + self.log(f"Creating index '{self.index_name}' with base mapping") + client.indices.create(index=self.index_name, body=mapping) + except RequestError as creation_error: + if creation_error.error != "resource_already_exists_exception": + logger.warning(f"Failed to create index '{self.index_name}': {creation_error}") + + # Ensure the dynamic field exists in the index + self._ensure_embedding_field_mapping( + client=client, + index_name=self.index_name, + field_name=dynamic_field_name, + dim=dim, + engine=engine, + space_type=space_type, + ef_construction=ef_construction, + m=m, + ) + + self.log(f"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...") + logger.info(f"Will store embeddings in field: {dynamic_field_name}") + logger.info(f"Will tag documents with embedding_model: {embedding_model}") + + # Use the bulk ingestion with model tracking + return_ids = self._bulk_ingest_embeddings( + client=client, + index_name=self.index_name, + embeddings=vectors, + texts=texts, + metadatas=metadatas, + vector_field=dynamic_field_name, # Use dynamic field name + text_field="text", + embedding_model=embedding_model, # Track the model + mapping=mapping, + is_aoss=is_aoss, + ) + self.log(metadatas) + + logger.info( + f"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'" + ) + self.log(f"Successfully indexed {len(return_ids)} documents with model {embedding_model}.") + + # ---------- helpers for filters ---------- + def _is_placeholder_term(self, term_obj: dict) -> bool: + # term_obj like {"filename": "__IMPOSSIBLE_VALUE__"} + return any(v == "__IMPOSSIBLE_VALUE__" for v in term_obj.values()) + + def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]: + """Convert filter expressions into OpenSearch-compatible filter clauses. + + This method accepts two filter formats and converts them to standardized + OpenSearch query clauses: + + Format A - Explicit filters: + {"filter": [{"term": {"field": "value"}}, {"terms": {"field": ["val1", "val2"]}}], + "limit": 10, "score_threshold": 1.5} + + Format B - Context-style mapping: + {"data_sources": ["file1.pdf"], "document_types": ["pdf"], "owners": ["user1"]} + + Args: + filter_obj: Filter configuration dictionary or None + + Returns: + List of OpenSearch filter clauses (term/terms objects) + Placeholder values with "__IMPOSSIBLE_VALUE__" are ignored + """ + if not filter_obj: + return [] + + # If it is a string, try to parse it once + if isinstance(filter_obj, str): + try: + filter_obj = json.loads(filter_obj) + except json.JSONDecodeError: + # Not valid JSON - treat as no filters + return [] + + # Case A: already an explicit list/dict under "filter" + if "filter" in filter_obj: + raw = filter_obj["filter"] + if isinstance(raw, dict): + raw = [raw] + explicit_clauses: list[dict] = [] + for f in raw or []: + if "term" in f and isinstance(f["term"], dict) and not self._is_placeholder_term(f["term"]): + explicit_clauses.append(f) + elif "terms" in f and isinstance(f["terms"], dict): + field, vals = next(iter(f["terms"].items())) + if isinstance(vals, list) and len(vals) > 0: + explicit_clauses.append(f) + return explicit_clauses + + # Case B: convert context-style maps into clauses + field_mapping = { + "data_sources": "filename", + "document_types": "mimetype", + "owners": "owner", + } + context_clauses: list[dict] = [] + for k, values in filter_obj.items(): + if not isinstance(values, list): + continue + field = field_mapping.get(k, k) + if len(values) == 0: + # Match-nothing placeholder (kept to mirror your tool semantics) + context_clauses.append({"term": {field: "__IMPOSSIBLE_VALUE__"}}) + elif len(values) == 1: + if values[0] != "__IMPOSSIBLE_VALUE__": + context_clauses.append({"term": {field: values[0]}}) + else: + context_clauses.append({"terms": {field: values}}) + return context_clauses + + def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]: + """Detect which embedding models have documents in the index. + + Uses aggregation to find all unique embedding_model values, optionally + filtered to only documents matching the user's filter criteria. + + Args: + client: OpenSearch client instance + filter_clauses: Optional filter clauses to scope model detection + + Returns: + List of embedding model names found in the index + """ + try: + agg_query = {"size": 0, "aggs": {"embedding_models": {"terms": {"field": "embedding_model", "size": 10}}}} + + # Apply filters to model detection if any exist + if filter_clauses: + agg_query["query"] = {"bool": {"filter": filter_clauses}} + + logger.debug(f"Model detection query: {agg_query}") + result = client.search( + index=self.index_name, + body=agg_query, + params={"terminate_after": 0}, + ) + buckets = result.get("aggregations", {}).get("embedding_models", {}).get("buckets", []) + models = [b["key"] for b in buckets if b["key"]] + + # Log detailed bucket info for debugging + logger.info( + f"Detected embedding models in corpus: {models}" + + (f" (with {len(filter_clauses)} filters)" if filter_clauses else "") + ) + if not models: + total_hits = result.get("hits", {}).get("total", {}) + total_count = total_hits.get("value", 0) if isinstance(total_hits, dict) else total_hits + logger.warning( + f"No embedding_model values found in index '{self.index_name}'. " + f"Total docs in index: {total_count}. " + f"This may indicate documents were indexed without the embedding_model field." + ) + except (OpenSearchException, KeyError, ValueError) as e: + logger.warning(f"Failed to detect embedding models: {e}") + # Fallback to current model + fallback_model = self._get_embedding_model_name() + logger.info(f"Using fallback model: {fallback_model}") + return [fallback_model] + else: + return models + + def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None: + """Retrieve flattened mapping properties for the current index.""" + try: + mapping = client.indices.get_mapping(index=self.index_name) + except OpenSearchException as e: + logger.warning( + f"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata." + ) + return None + + properties: dict[str, Any] = {} + for index_data in mapping.values(): + props = index_data.get("mappings", {}).get("properties", {}) + if isinstance(props, dict): + properties.update(props) + return properties + + def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool: + """Check whether the field is mapped as a knn_vector.""" + if not field_name: + return False + if properties is None: + logger.warning(f"Mapping metadata unavailable; assuming field '{field_name}' is usable.") + return True + field_def = properties.get(field_name) + if not isinstance(field_def, dict): + return False + if field_def.get("type") == "knn_vector": + return True + + nested_props = field_def.get("properties") + return bool(isinstance(nested_props, dict) and nested_props.get("type") == "knn_vector") + + def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None: + """Get the dimension of a knn_vector field from the index mapping. + + Args: + properties: Index properties from mapping + field_name: Name of the vector field + + Returns: + Dimension of the field, or None if not found + """ + if not field_name or properties is None: + return None + + field_def = properties.get(field_name) + if not isinstance(field_def, dict): + return None + + # Check direct knn_vector field + if field_def.get("type") == "knn_vector": + return field_def.get("dimension") + + # Check nested properties + nested_props = field_def.get("properties") + if isinstance(nested_props, dict) and nested_props.get("type") == "knn_vector": + return nested_props.get("dimension") + + return None + + # ---------- search (multi-model hybrid) ---------- + def search(self, query: str | None = None) -> list[dict[str, Any]]: + """Perform multi-model hybrid search combining multiple vector similarities and keyword matching. + + This method executes a sophisticated search that: + 1. Auto-detects all embedding models present in the index + 2. Generates query embeddings for ALL detected models in parallel + 3. Combines multiple KNN queries using dis_max (picks best match) + 4. Adds keyword search with fuzzy matching (30% weight) + 5. Applies optional filtering and score thresholds + 6. Returns aggregations for faceted search + + Search weights: + - Semantic search (dis_max across all models): 70% + - Keyword search: 30% + + Args: + query: Search query string (used for both vector embedding and keyword search) + + Returns: + List of search results with page_content, metadata, and relevance scores + + Raises: + ValueError: If embedding component is not provided or filter JSON is invalid + """ + logger.info(self.ingest_data) + client = self.build_client() + q = (query or "").strip() + + # Parse optional filter expression + filter_obj = None + if getattr(self, "filter_expression", "") and self.filter_expression.strip(): + try: + filter_obj = json.loads(self.filter_expression) + except json.JSONDecodeError as e: + msg = f"Invalid filter_expression JSON: {e}" + raise ValueError(msg) from e + + if not self.embedding: + msg = "Embedding is required to run hybrid search (KNN + keyword)." + raise ValueError(msg) + + # Check if embedding is None (fail-safe mode) + if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)): + logger.error("Embedding returned None (fail-safe mode enabled). Cannot perform search.") + return [] + + # Build filter clauses first so we can use them in model detection + filter_clauses = self._coerce_filter_clauses(filter_obj) + + # Detect available embedding models in the index (scoped by filters) + available_models = self._detect_available_models(client, filter_clauses) + + if not available_models: + logger.warning("No embedding models found in index, using current model") + available_models = [self._get_embedding_model_name()] + + # Generate embeddings for ALL detected models + query_embeddings = {} + + # Normalize embedding to list + embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding] + # Filter out None values (fail-safe mode) + embeddings_list = [e for e in embeddings_list if e is not None] + + if not embeddings_list: + logger.error( + "No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search." + ) + return [] + + # Create a comprehensive map of model names to embedding objects + # Check all possible identifiers (deployment, model, model_id, model_name) + # Also leverage available_models list from EmbeddingsWithModels + # Handle duplicate identifiers by creating combined keys + embedding_by_model = {} + identifier_conflicts = {} # Track which identifiers have conflicts + + for idx, emb_obj in enumerate(embeddings_list): + # Get all possible identifiers for this embedding + identifiers = [] + deployment = getattr(emb_obj, "deployment", None) + model = getattr(emb_obj, "model", None) + model_id = getattr(emb_obj, "model_id", None) + model_name = getattr(emb_obj, "model_name", None) + dimensions = getattr(emb_obj, "dimensions", None) + available_models_attr = getattr(emb_obj, "available_models", None) + + logger.info( + f"Embedding object {idx}: deployment={deployment}, model={model}, " + f"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, " + f"available_models={available_models_attr}" + ) + + # If this embedding has available_models dict, map all models to their dedicated instances + if available_models_attr and isinstance(available_models_attr, dict): + logger.info( + f"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict" + ) + for model_name_key, dedicated_embedding in available_models_attr.items(): + if model_name_key and str(model_name_key).strip(): + model_str = str(model_name_key).strip() + if model_str not in embedding_by_model: + # Use the dedicated embedding instance from the dict + embedding_by_model[model_str] = dedicated_embedding + logger.info(f"Mapped available model '{model_str}' to dedicated embedding instance") + else: + # Conflict detected - track it + if model_str not in identifier_conflicts: + identifier_conflicts[model_str] = [embedding_by_model[model_str]] + identifier_conflicts[model_str].append(dedicated_embedding) + logger.warning(f"Available model '{model_str}' has conflict - used by multiple embeddings") + + # Also map traditional identifiers (for backward compatibility) + if deployment: + identifiers.append(str(deployment)) + if model: + identifiers.append(str(model)) + if model_id: + identifiers.append(str(model_id)) + if model_name: + identifiers.append(str(model_name)) + + # Map all identifiers to this embedding object + for identifier in identifiers: + if identifier not in embedding_by_model: + embedding_by_model[identifier] = emb_obj + logger.info(f"Mapped identifier '{identifier}' to embedding object {idx}") + else: + # Conflict detected - track it + if identifier not in identifier_conflicts: + identifier_conflicts[identifier] = [embedding_by_model[identifier]] + identifier_conflicts[identifier].append(emb_obj) + logger.warning(f"Identifier '{identifier}' has conflict - used by multiple embeddings") + + # For embeddings with model+deployment, create combined identifier + # This helps when deployment is the same but model differs + if deployment and model and deployment != model: + combined_id = f"{deployment}:{model}" + if combined_id not in embedding_by_model: + embedding_by_model[combined_id] = emb_obj + logger.info(f"Created combined identifier '{combined_id}' for embedding object {idx}") + + # Log conflicts + if identifier_conflicts: + logger.warning( + f"Found {len(identifier_conflicts)} conflicting identifiers. " + f"Consider using combined format 'deployment:model' or specifying unique model names." + ) + for conflict_id, emb_list in identifier_conflicts.items(): + logger.warning(f" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier") + + logger.info(f"Generating embeddings for {len(available_models)} models in index") + logger.info(f"Available embedding identifiers: {list(embedding_by_model.keys())}") + self.log(f"[SEARCH] Models detected in index: {available_models}") + self.log(f"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}") + + # Track matching status for debugging + matched_models = [] + unmatched_models = [] + + for model_name in available_models: + try: + # Check if we have an embedding object for this model + if model_name in embedding_by_model: + # Use the matching embedding object directly + emb_obj = embedding_by_model[model_name] + emb_deployment = getattr(emb_obj, "deployment", None) + emb_model = getattr(emb_obj, "model", None) + emb_model_id = getattr(emb_obj, "model_id", None) + emb_dimensions = getattr(emb_obj, "dimensions", None) + emb_available_models = getattr(emb_obj, "available_models", None) + + logger.info( + f"Using embedding object for model '{model_name}': " + f"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, " + f"dimensions={emb_dimensions}" + ) + + # Check if this is a dedicated instance from available_models dict + if emb_available_models and isinstance(emb_available_models, dict): + logger.info( + f"Model '{model_name}' using dedicated instance from available_models dict " + f"(pre-configured with correct model and dimensions)" + ) + + # Use the embedding instance directly - no model switching needed! + vec = emb_obj.embed_query(q) + query_embeddings[model_name] = vec + matched_models.append(model_name) + logger.info(f"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})") + self.log(f"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding") + else: + # No matching embedding found for this model + unmatched_models.append(model_name) + logger.warning( + f"No matching embedding found for model '{model_name}'. " + f"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}" + ) + self.log(f"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}") + except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e: + logger.warning(f"Failed to generate embedding for {model_name}: {e}") + self.log(f"[ERROR] Embedding generation failed for '{model_name}': {e}") + + # Log summary of model matching + logger.info(f"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched") + self.log(f"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched") + if unmatched_models: + self.log(f"[WARN] Unmatched models in index: {unmatched_models}") + + if not query_embeddings: + msg = ( + f"Failed to generate embeddings for any model. " + f"Index has models: {available_models}, but no matching embedding objects found. " + f"Available embedding identifiers: {list(embedding_by_model.keys())}" + ) + self.log(f"[FAIL] Search failed: {msg}") + raise ValueError(msg) + + index_properties = self._get_index_properties(client) + legacy_vector_field = getattr(self, "vector_field", "chunk_embedding") + + # Build KNN queries for each model + embedding_fields: list[str] = [] + knn_queries_with_candidates = [] + knn_queries_without_candidates = [] + + raw_num_candidates = getattr(self, "num_candidates", 1000) + try: + num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0 + except (TypeError, ValueError): + num_candidates = 0 + use_num_candidates = num_candidates > 0 + + for model_name, embedding_vector in query_embeddings.items(): + field_name = get_embedding_field_name(model_name) + selected_field = field_name + vector_dim = len(embedding_vector) + + # Only use the expected dynamic field - no legacy fallback + # This prevents dimension mismatches between models + if not self._is_knn_vector_field(index_properties, selected_field): + logger.warning( + f"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. " + f"Documents must be indexed with this embedding model before querying." + ) + self.log(f"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'") + continue + + # Validate vector dimensions match the field dimensions + field_dim = self._get_field_dimension(index_properties, selected_field) + if field_dim is not None and field_dim != vector_dim: + logger.error( + f"Dimension mismatch for model '{model_name}': " + f"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. " + f"Skipping this model to prevent search errors." + ) + self.log(f"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping") + continue + + logger.info( + f"Adding KNN query for model '{model_name}': field='{selected_field}', " + f"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}" + ) + embedding_fields.append(selected_field) + + base_query = { + "knn": { + selected_field: { + "vector": embedding_vector, + "k": 50, + } + } + } + + if use_num_candidates: + query_with_candidates = copy.deepcopy(base_query) + query_with_candidates["knn"][selected_field]["num_candidates"] = num_candidates + else: + query_with_candidates = base_query + + knn_queries_with_candidates.append(query_with_candidates) + knn_queries_without_candidates.append(base_query) + + if not knn_queries_with_candidates: + # No valid fields found - this can happen when: + # 1. Index is empty (no documents yet) + # 2. Embedding model has changed and field doesn't exist yet + # Return empty results instead of failing + logger.warning( + "No valid knn_vector fields found for embedding models. " + "This may indicate an empty index or missing field mappings. " + "Returning empty search results." + ) + self.log( + f"[WARN] No valid KNN queries could be built. " + f"Query embeddings generated: {list(query_embeddings.keys())}, " + f"but no matching knn_vector fields found in index." + ) + return [] + + # Build exists filter - document must have at least one embedding field + exists_any_embedding = { + "bool": {"should": [{"exists": {"field": f}} for f in set(embedding_fields)], "minimum_should_match": 1} + } + + # Combine user filters with exists filter + all_filters = [*filter_clauses, exists_any_embedding] + + # Get limit and score threshold + limit = (filter_obj or {}).get("limit", self.number_of_results) + score_threshold = (filter_obj or {}).get("score_threshold", 0) + + # Build multi-model hybrid query + body = { + "query": { + "bool": { + "should": [ + { + "dis_max": { + "tie_breaker": 0.0, # Take only the best match, no blending + "boost": 0.7, # 70% weight for semantic search + "queries": knn_queries_with_candidates, + } + }, + { + "multi_match": { + "query": q, + "fields": ["text^2", "filename^1.5"], + "type": "best_fields", + "fuzziness": "AUTO", + "boost": 0.3, # 30% weight for keyword search + } + }, + ], + "minimum_should_match": 1, + "filter": all_filters, + } + }, + "aggs": { + "data_sources": {"terms": {"field": "filename", "size": 20}}, + "document_types": {"terms": {"field": "mimetype", "size": 10}}, + "owners": {"terms": {"field": "owner", "size": 10}}, + "embedding_models": {"terms": {"field": "embedding_model", "size": 10}}, + }, + "_source": [ + "filename", + "mimetype", + "page", + "text", + "source_url", + "owner", + "embedding_model", + "allowed_users", + "allowed_groups", + ], + "size": limit, + } + + if isinstance(score_threshold, (int, float)) and score_threshold > 0: + body["min_score"] = score_threshold + + logger.info( + f"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: " + f"{list(query_embeddings.keys())}" + ) + self.log(f"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}") + self.log(f"[EXEC] Embedding models used: {list(query_embeddings.keys())}") + self.log(f"[EXEC] KNN fields being queried: {embedding_fields}") + + try: + resp = client.search(index=self.index_name, body=body, params={"terminate_after": 0}) + except RequestError as e: + error_message = str(e) + lowered = error_message.lower() + if use_num_candidates and "num_candidates" in lowered: + logger.warning( + "Retrying search without num_candidates parameter due to cluster capabilities", + error=error_message, + ) + fallback_body = copy.deepcopy(body) + try: + fallback_body["query"]["bool"]["should"][0]["dis_max"]["queries"] = knn_queries_without_candidates + except (KeyError, IndexError, TypeError) as inner_err: + raise e from inner_err + resp = client.search( + index=self.index_name, + body=fallback_body, + params={"terminate_after": 0}, + ) + elif "knn_vector" in lowered or ("field" in lowered and "knn" in lowered): + fallback_vector = next(iter(query_embeddings.values()), None) + if fallback_vector is None: + raise + fallback_field = legacy_vector_field or "chunk_embedding" + logger.warning( + "KNN search failed for dynamic fields; falling back to legacy field '%s'.", + fallback_field, + ) + fallback_body = copy.deepcopy(body) + fallback_body["query"]["bool"]["filter"] = filter_clauses + knn_fallback = { + "knn": { + fallback_field: { + "vector": fallback_vector, + "k": 50, + } + } + } + if use_num_candidates: + knn_fallback["knn"][fallback_field]["num_candidates"] = num_candidates + fallback_body["query"]["bool"]["should"][0]["dis_max"]["queries"] = [knn_fallback] + resp = client.search( + index=self.index_name, + body=fallback_body, + params={"terminate_after": 0}, + ) + else: + raise + hits = resp.get("hits", {}).get("hits", []) + + logger.info(f"Found {len(hits)} results") + self.log(f"[RESULT] Search complete: {len(hits)} results found") + + if len(hits) == 0: + self.log( + f"[EMPTY] Debug info: " + f"models_in_index={available_models}, " + f"matched_models={matched_models}, " + f"knn_fields={embedding_fields}, " + f"filters={len(filter_clauses)} clauses" + ) + + return [ + { + "page_content": hit["_source"].get("text", ""), + "metadata": {k: v for k, v in hit["_source"].items() if k != "text"}, + "score": hit.get("_score"), + } + for hit in hits + ] + + def search_documents(self) -> list[Data]: + """Search documents and return results as Data objects. + + This is the main interface method that performs the multi-model search using the + configured search_query and returns results in Langflow's Data format. + + Always builds the vector store (triggering ingestion if needed), then performs + search only if a query is provided. + + Returns: + List of Data objects containing search results with text and metadata + + Raises: + Exception: If search operation fails + """ + try: + # Always build/cache the vector store to ensure ingestion happens + logger.info(f"Search query: {self.search_query}") + if self._cached_vector_store is None: + self.build_vector_store() + + # Only perform search if query is provided + search_query = (self.search_query or "").strip() + if not search_query: + self.log("No search query provided - ingestion completed, returning empty results") + return [] + + # Perform search with the provided query + raw = self.search(search_query) + return [Data(text=hit["page_content"], **hit["metadata"]) for hit in raw] + except Exception as e: + self.log(f"search_documents error: {e}") + raise + + # -------- dynamic UI handling (auth switch) -------- + async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict: + """Dynamically update component configuration based on field changes. + + This method handles real-time UI updates, particularly for authentication + mode changes that show/hide relevant input fields. + + Args: + build_config: Current component configuration + field_value: New value for the changed field + field_name: Name of the field that changed + + Returns: + Updated build configuration with appropriate field visibility + """ + try: + if field_name == "auth_mode": + mode = (field_value or "basic").strip().lower() + is_basic = mode == "basic" + is_jwt = mode == "jwt" + + build_config["username"]["show"] = is_basic + build_config["password"]["show"] = is_basic + + build_config["jwt_token"]["show"] = is_jwt + build_config["jwt_header"]["show"] = is_jwt + build_config["bearer_prefix"]["show"] = is_jwt + + build_config["username"]["required"] = is_basic + build_config["password"]["required"] = is_basic + + build_config["jwt_token"]["required"] = is_jwt + build_config["jwt_header"]["required"] = is_jwt + build_config["bearer_prefix"]["required"] = False + + return build_config + + except (KeyError, ValueError) as e: + self.log(f"update_build_config error: {e}") + + return build_config diff --git a/flows/ingestion_flow.json b/flows/ingestion_flow.json index 39c73c40..25a5cefd 100644 --- a/flows/ingestion_flow.json +++ b/flows/ingestion_flow.json @@ -3603,7 +3603,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n\n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"Starting ingestion mode...\")\n\n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n logger.debug(\n f\"[INGESTION] ingest_data type: \"\n f\"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\"\n )\n logger.debug(\n f\"[INGESTION] ingest_data content: \"\n f\"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\"\n )\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n # Restrict concurrency for IBM/Watsonx models to avoid rate limits\n is_ibm = (embedding_model and \"ibm\" in str(embedding_model).lower()) or (\n selected_embedding and \"watsonx\" in type(selected_embedding).__name__.lower()\n )\n logger.debug(f\"Is IBM: {is_ibm}\")\n max_workers = 1 if is_ibm else min(max(len(texts), 1), 8)\n\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(\n f\"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\"\n )\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n logger.debug(f\"Model detection query: {agg_query}\")\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n # Log detailed bucket info for debugging\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n if not models:\n total_hits = result.get(\"hits\", {}).get(\"total\", {})\n total_count = total_hits.get(\"value\", 0) if isinstance(total_hits, dict) else total_hits\n logger.warning(\n f\"No embedding_model values found in index '{self.index_name}'. \"\n f\"Total docs in index: {total_count}. \"\n f\"This may indicate documents were indexed without the embedding_model field.\"\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n fallback_model = self._get_embedding_model_name()\n logger.info(f\"Using fallback model: {fallback_model}\")\n return [fallback_model]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n if not embeddings_list:\n logger.error(\n \"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\"\n )\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models_attr}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models_attr and isinstance(available_models_attr, dict):\n logger.info(\n f\"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict\"\n )\n for model_name_key, dedicated_embedding in available_models_attr.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n self.log(f\"[SEARCH] Models detected in index: {available_models}\")\n self.log(f\"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n # Track matching status for debugging\n matched_models = []\n unmatched_models = []\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n matched_models.append(model_name)\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n self.log(f\"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding\")\n else:\n # No matching embedding found for this model\n unmatched_models.append(model_name)\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}\")\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n self.log(f\"[ERROR] Embedding generation failed for '{model_name}': {e}\")\n\n # Log summary of model matching\n logger.info(f\"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n self.log(f\"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n if unmatched_models:\n self.log(f\"[WARN] Unmatched models in index: {unmatched_models}\")\n\n if not query_embeddings:\n msg = (\n f\"Failed to generate embeddings for any model. \"\n f\"Index has models: {available_models}, but no matching embedding objects found. \"\n f\"Available embedding identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[FAIL] Search failed: {msg}\")\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n self.log(f\"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'\")\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n self.log(f\"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping\")\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n self.log(\n f\"[WARN] No valid KNN queries could be built. \"\n f\"Query embeddings generated: {list(query_embeddings.keys())}, \"\n f\"but no matching knn_vector fields found in index.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: \"\n f\"{list(query_embeddings.keys())}\"\n )\n self.log(f\"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}\")\n self.log(f\"[EXEC] Embedding models used: {list(query_embeddings.keys())}\")\n self.log(f\"[EXEC] KNN fields being queried: {embedding_fields}\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n self.log(f\"[RESULT] Search complete: {len(hits)} results found\")\n\n if len(hits) == 0:\n self.log(\n f\"[EMPTY] Debug info: \"\n f\"models_in_index={available_models}, \"\n f\"matched_models={matched_models}, \"\n f\"knn_fields={embedding_fields}, \"\n f\"filters={len(filter_clauses)} clauses\"\n )\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n logger.info(f\"Search query: {self.search_query}\")\n if self._cached_vector_store is None:\n self.build_vector_store()\n\n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n\n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" + "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n\n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"Starting ingestion mode...\")\n\n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n logger.debug(\n f\"[INGESTION] ingest_data type: \"\n f\"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\"\n )\n logger.debug(\n f\"[INGESTION] ingest_data content: \"\n f\"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\"\n )\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings with rate-limit-aware retry logic using tenacity\n from tenacity import (\n retry,\n retry_if_exception,\n stop_after_attempt,\n wait_exponential,\n )\n\n def is_rate_limit_error(exception: Exception) -> bool:\n \"\"\"Check if exception is a rate limit error (429).\"\"\"\n error_str = str(exception).lower()\n return \"429\" in error_str or \"rate_limit\" in error_str or \"rate limit\" in error_str\n\n def is_other_retryable_error(exception: Exception) -> bool:\n \"\"\"Check if exception is retryable but not a rate limit error.\"\"\"\n # Retry on most exceptions except for specific non-retryable ones\n # Add other non-retryable exceptions here if needed\n return not is_rate_limit_error(exception)\n\n # Create retry decorator for rate limit errors (longer backoff)\n retry_on_rate_limit = retry(\n retry=retry_if_exception(is_rate_limit_error),\n stop=stop_after_attempt(5),\n wait=wait_exponential(multiplier=2, min=2, max=30),\n reraise=True,\n before_sleep=lambda retry_state: logger.warning(\n f\"Rate limit hit for chunk (attempt {retry_state.attempt_number}/5), \"\n f\"backing off for {retry_state.next_action.sleep:.1f}s\"\n ),\n )\n\n # Create retry decorator for other errors (shorter backoff)\n retry_on_other_errors = retry(\n retry=retry_if_exception(is_other_retryable_error),\n stop=stop_after_attempt(3),\n wait=wait_exponential(multiplier=1, min=1, max=8),\n reraise=True,\n before_sleep=lambda retry_state: logger.warning(\n f\"Error embedding chunk (attempt {retry_state.attempt_number}/3), \"\n f\"retrying in {retry_state.next_action.sleep:.1f}s: {retry_state.outcome.exception()}\"\n ),\n )\n\n def embed_chunk_with_retry(chunk_text: str, chunk_idx: int) -> list[float]:\n \"\"\"Embed a single chunk with rate-limit-aware retry logic.\"\"\"\n\n @retry_on_rate_limit\n @retry_on_other_errors\n def _embed(text: str) -> list[float]:\n return selected_embedding.embed_documents([text])[0]\n\n try:\n return _embed(chunk_text)\n except Exception as e:\n logger.error(\n f\"Failed to embed chunk {chunk_idx} after all retries: {e}\",\n error=str(e),\n )\n raise\n\n # Restrict concurrency for IBM/Watsonx models to avoid rate limits\n is_ibm = (embedding_model and \"ibm\" in str(embedding_model).lower()) or (\n selected_embedding and \"watsonx\" in type(selected_embedding).__name__.lower()\n )\n logger.debug(f\"Is IBM: {is_ibm}\")\n\n # For IBM models, use sequential processing with rate limiting\n # For other models, use parallel processing\n vectors: list[list[float]] = [None] * len(texts)\n\n if is_ibm:\n # Sequential processing with inter-request delay for IBM models\n inter_request_delay = 0.6 # ~1.67 req/s, safely under 2 req/s limit\n logger.info(\n f\"Using sequential processing for IBM model with {inter_request_delay}s delay between requests\"\n )\n\n for idx, chunk in enumerate(texts):\n if idx > 0:\n # Add delay between requests (but not before the first one)\n time.sleep(inter_request_delay)\n vectors[idx] = embed_chunk_with_retry(chunk, idx)\n else:\n # Parallel processing for non-IBM models\n max_workers = min(max(len(texts), 1), 8)\n logger.debug(f\"Using parallel processing with {max_workers} workers\")\n\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk_with_retry, chunk, idx): idx for idx, chunk in enumerate(texts)}\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(\n f\"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\"\n )\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n logger.debug(f\"Model detection query: {agg_query}\")\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n # Log detailed bucket info for debugging\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n if not models:\n total_hits = result.get(\"hits\", {}).get(\"total\", {})\n total_count = total_hits.get(\"value\", 0) if isinstance(total_hits, dict) else total_hits\n logger.warning(\n f\"No embedding_model values found in index '{self.index_name}'. \"\n f\"Total docs in index: {total_count}. \"\n f\"This may indicate documents were indexed without the embedding_model field.\"\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n fallback_model = self._get_embedding_model_name()\n logger.info(f\"Using fallback model: {fallback_model}\")\n return [fallback_model]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n if not embeddings_list:\n logger.error(\n \"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\"\n )\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models_attr}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models_attr and isinstance(available_models_attr, dict):\n logger.info(\n f\"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict\"\n )\n for model_name_key, dedicated_embedding in available_models_attr.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n self.log(f\"[SEARCH] Models detected in index: {available_models}\")\n self.log(f\"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n # Track matching status for debugging\n matched_models = []\n unmatched_models = []\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n matched_models.append(model_name)\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n self.log(f\"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding\")\n else:\n # No matching embedding found for this model\n unmatched_models.append(model_name)\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}\")\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n self.log(f\"[ERROR] Embedding generation failed for '{model_name}': {e}\")\n\n # Log summary of model matching\n logger.info(f\"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n self.log(f\"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n if unmatched_models:\n self.log(f\"[WARN] Unmatched models in index: {unmatched_models}\")\n\n if not query_embeddings:\n msg = (\n f\"Failed to generate embeddings for any model. \"\n f\"Index has models: {available_models}, but no matching embedding objects found. \"\n f\"Available embedding identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[FAIL] Search failed: {msg}\")\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n self.log(f\"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'\")\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n self.log(f\"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping\")\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n self.log(\n f\"[WARN] No valid KNN queries could be built. \"\n f\"Query embeddings generated: {list(query_embeddings.keys())}, \"\n f\"but no matching knn_vector fields found in index.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: \"\n f\"{list(query_embeddings.keys())}\"\n )\n self.log(f\"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}\")\n self.log(f\"[EXEC] Embedding models used: {list(query_embeddings.keys())}\")\n self.log(f\"[EXEC] KNN fields being queried: {embedding_fields}\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n self.log(f\"[RESULT] Search complete: {len(hits)} results found\")\n\n if len(hits) == 0:\n self.log(\n f\"[EMPTY] Debug info: \"\n f\"models_in_index={available_models}, \"\n f\"matched_models={matched_models}, \"\n f\"knn_fields={embedding_fields}, \"\n f\"filters={len(filter_clauses)} clauses\"\n )\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n logger.info(f\"Search query: {self.search_query}\")\n if self._cached_vector_store is None:\n self.build_vector_store()\n\n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n\n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" }, "docs_metadata": { "_input_type": "TableInput", @@ -5725,4 +5725,4 @@ "rag", "q-a" ] -} \ No newline at end of file +} diff --git a/flows/openrag_agent.json b/flows/openrag_agent.json index b307b118..d9475aac 100644 --- a/flows/openrag_agent.json +++ b/flows/openrag_agent.json @@ -2910,7 +2910,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n\n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"Starting ingestion mode...\")\n\n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n logger.debug(\n f\"[INGESTION] ingest_data type: \"\n f\"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\"\n )\n logger.debug(\n f\"[INGESTION] ingest_data content: \"\n f\"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\"\n )\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n # Restrict concurrency for IBM/Watsonx models to avoid rate limits\n is_ibm = (embedding_model and \"ibm\" in str(embedding_model).lower()) or (\n selected_embedding and \"watsonx\" in type(selected_embedding).__name__.lower()\n )\n logger.debug(f\"Is IBM: {is_ibm}\")\n max_workers = 1 if is_ibm else min(max(len(texts), 1), 8)\n\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(\n f\"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\"\n )\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n logger.debug(f\"Model detection query: {agg_query}\")\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n # Log detailed bucket info for debugging\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n if not models:\n total_hits = result.get(\"hits\", {}).get(\"total\", {})\n total_count = total_hits.get(\"value\", 0) if isinstance(total_hits, dict) else total_hits\n logger.warning(\n f\"No embedding_model values found in index '{self.index_name}'. \"\n f\"Total docs in index: {total_count}. \"\n f\"This may indicate documents were indexed without the embedding_model field.\"\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n fallback_model = self._get_embedding_model_name()\n logger.info(f\"Using fallback model: {fallback_model}\")\n return [fallback_model]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n if not embeddings_list:\n logger.error(\n \"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\"\n )\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models_attr}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models_attr and isinstance(available_models_attr, dict):\n logger.info(\n f\"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict\"\n )\n for model_name_key, dedicated_embedding in available_models_attr.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n self.log(f\"[SEARCH] Models detected in index: {available_models}\")\n self.log(f\"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n # Track matching status for debugging\n matched_models = []\n unmatched_models = []\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n matched_models.append(model_name)\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n self.log(f\"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding\")\n else:\n # No matching embedding found for this model\n unmatched_models.append(model_name)\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}\")\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n self.log(f\"[ERROR] Embedding generation failed for '{model_name}': {e}\")\n\n # Log summary of model matching\n logger.info(f\"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n self.log(f\"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n if unmatched_models:\n self.log(f\"[WARN] Unmatched models in index: {unmatched_models}\")\n\n if not query_embeddings:\n msg = (\n f\"Failed to generate embeddings for any model. \"\n f\"Index has models: {available_models}, but no matching embedding objects found. \"\n f\"Available embedding identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[FAIL] Search failed: {msg}\")\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n self.log(f\"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'\")\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n self.log(f\"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping\")\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n self.log(\n f\"[WARN] No valid KNN queries could be built. \"\n f\"Query embeddings generated: {list(query_embeddings.keys())}, \"\n f\"but no matching knn_vector fields found in index.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: \"\n f\"{list(query_embeddings.keys())}\"\n )\n self.log(f\"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}\")\n self.log(f\"[EXEC] Embedding models used: {list(query_embeddings.keys())}\")\n self.log(f\"[EXEC] KNN fields being queried: {embedding_fields}\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n self.log(f\"[RESULT] Search complete: {len(hits)} results found\")\n\n if len(hits) == 0:\n self.log(\n f\"[EMPTY] Debug info: \"\n f\"models_in_index={available_models}, \"\n f\"matched_models={matched_models}, \"\n f\"knn_fields={embedding_fields}, \"\n f\"filters={len(filter_clauses)} clauses\"\n )\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n logger.info(f\"Search query: {self.search_query}\")\n if self._cached_vector_store is None:\n self.build_vector_store()\n\n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n\n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" + "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n\n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"Starting ingestion mode...\")\n\n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n logger.debug(\n f\"[INGESTION] ingest_data type: \"\n f\"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\"\n )\n logger.debug(\n f\"[INGESTION] ingest_data content: \"\n f\"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\"\n )\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings with rate-limit-aware retry logic using tenacity\n from tenacity import (\n retry,\n retry_if_exception,\n stop_after_attempt,\n wait_exponential,\n )\n\n def is_rate_limit_error(exception: Exception) -> bool:\n \"\"\"Check if exception is a rate limit error (429).\"\"\"\n error_str = str(exception).lower()\n return \"429\" in error_str or \"rate_limit\" in error_str or \"rate limit\" in error_str\n\n def is_other_retryable_error(exception: Exception) -> bool:\n \"\"\"Check if exception is retryable but not a rate limit error.\"\"\"\n # Retry on most exceptions except for specific non-retryable ones\n # Add other non-retryable exceptions here if needed\n return not is_rate_limit_error(exception)\n\n # Create retry decorator for rate limit errors (longer backoff)\n retry_on_rate_limit = retry(\n retry=retry_if_exception(is_rate_limit_error),\n stop=stop_after_attempt(5),\n wait=wait_exponential(multiplier=2, min=2, max=30),\n reraise=True,\n before_sleep=lambda retry_state: logger.warning(\n f\"Rate limit hit for chunk (attempt {retry_state.attempt_number}/5), \"\n f\"backing off for {retry_state.next_action.sleep:.1f}s\"\n ),\n )\n\n # Create retry decorator for other errors (shorter backoff)\n retry_on_other_errors = retry(\n retry=retry_if_exception(is_other_retryable_error),\n stop=stop_after_attempt(3),\n wait=wait_exponential(multiplier=1, min=1, max=8),\n reraise=True,\n before_sleep=lambda retry_state: logger.warning(\n f\"Error embedding chunk (attempt {retry_state.attempt_number}/3), \"\n f\"retrying in {retry_state.next_action.sleep:.1f}s: {retry_state.outcome.exception()}\"\n ),\n )\n\n def embed_chunk_with_retry(chunk_text: str, chunk_idx: int) -> list[float]:\n \"\"\"Embed a single chunk with rate-limit-aware retry logic.\"\"\"\n\n @retry_on_rate_limit\n @retry_on_other_errors\n def _embed(text: str) -> list[float]:\n return selected_embedding.embed_documents([text])[0]\n\n try:\n return _embed(chunk_text)\n except Exception as e:\n logger.error(\n f\"Failed to embed chunk {chunk_idx} after all retries: {e}\",\n error=str(e),\n )\n raise\n\n # Restrict concurrency for IBM/Watsonx models to avoid rate limits\n is_ibm = (embedding_model and \"ibm\" in str(embedding_model).lower()) or (\n selected_embedding and \"watsonx\" in type(selected_embedding).__name__.lower()\n )\n logger.debug(f\"Is IBM: {is_ibm}\")\n\n # For IBM models, use sequential processing with rate limiting\n # For other models, use parallel processing\n vectors: list[list[float]] = [None] * len(texts)\n\n if is_ibm:\n # Sequential processing with inter-request delay for IBM models\n inter_request_delay = 0.6 # ~1.67 req/s, safely under 2 req/s limit\n logger.info(\n f\"Using sequential processing for IBM model with {inter_request_delay}s delay between requests\"\n )\n\n for idx, chunk in enumerate(texts):\n if idx > 0:\n # Add delay between requests (but not before the first one)\n time.sleep(inter_request_delay)\n vectors[idx] = embed_chunk_with_retry(chunk, idx)\n else:\n # Parallel processing for non-IBM models\n max_workers = min(max(len(texts), 1), 8)\n logger.debug(f\"Using parallel processing with {max_workers} workers\")\n\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk_with_retry, chunk, idx): idx for idx, chunk in enumerate(texts)}\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(\n f\"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\"\n )\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n logger.debug(f\"Model detection query: {agg_query}\")\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n # Log detailed bucket info for debugging\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n if not models:\n total_hits = result.get(\"hits\", {}).get(\"total\", {})\n total_count = total_hits.get(\"value\", 0) if isinstance(total_hits, dict) else total_hits\n logger.warning(\n f\"No embedding_model values found in index '{self.index_name}'. \"\n f\"Total docs in index: {total_count}. \"\n f\"This may indicate documents were indexed without the embedding_model field.\"\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n fallback_model = self._get_embedding_model_name()\n logger.info(f\"Using fallback model: {fallback_model}\")\n return [fallback_model]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n if not embeddings_list:\n logger.error(\n \"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\"\n )\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models_attr}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models_attr and isinstance(available_models_attr, dict):\n logger.info(\n f\"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict\"\n )\n for model_name_key, dedicated_embedding in available_models_attr.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n self.log(f\"[SEARCH] Models detected in index: {available_models}\")\n self.log(f\"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n # Track matching status for debugging\n matched_models = []\n unmatched_models = []\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n matched_models.append(model_name)\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n self.log(f\"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding\")\n else:\n # No matching embedding found for this model\n unmatched_models.append(model_name)\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}\")\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n self.log(f\"[ERROR] Embedding generation failed for '{model_name}': {e}\")\n\n # Log summary of model matching\n logger.info(f\"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n self.log(f\"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n if unmatched_models:\n self.log(f\"[WARN] Unmatched models in index: {unmatched_models}\")\n\n if not query_embeddings:\n msg = (\n f\"Failed to generate embeddings for any model. \"\n f\"Index has models: {available_models}, but no matching embedding objects found. \"\n f\"Available embedding identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[FAIL] Search failed: {msg}\")\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n self.log(f\"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'\")\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n self.log(f\"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping\")\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n self.log(\n f\"[WARN] No valid KNN queries could be built. \"\n f\"Query embeddings generated: {list(query_embeddings.keys())}, \"\n f\"but no matching knn_vector fields found in index.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: \"\n f\"{list(query_embeddings.keys())}\"\n )\n self.log(f\"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}\")\n self.log(f\"[EXEC] Embedding models used: {list(query_embeddings.keys())}\")\n self.log(f\"[EXEC] KNN fields being queried: {embedding_fields}\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n self.log(f\"[RESULT] Search complete: {len(hits)} results found\")\n\n if len(hits) == 0:\n self.log(\n f\"[EMPTY] Debug info: \"\n f\"models_in_index={available_models}, \"\n f\"matched_models={matched_models}, \"\n f\"knn_fields={embedding_fields}, \"\n f\"filters={len(filter_clauses)} clauses\"\n )\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n logger.info(f\"Search query: {self.search_query}\")\n if self._cached_vector_store is None:\n self.build_vector_store()\n\n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n\n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" }, "docs_metadata": { "_input_type": "TableInput", diff --git a/flows/openrag_nudges.json b/flows/openrag_nudges.json index f7f38d2a..d9d79e60 100644 --- a/flows/openrag_nudges.json +++ b/flows/openrag_nudges.json @@ -2519,7 +2519,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n\n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"Starting ingestion mode...\")\n\n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n logger.debug(\n f\"[INGESTION] ingest_data type: \"\n f\"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\"\n )\n logger.debug(\n f\"[INGESTION] ingest_data content: \"\n f\"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\"\n )\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n # Restrict concurrency for IBM/Watsonx models to avoid rate limits\n is_ibm = (embedding_model and \"ibm\" in str(embedding_model).lower()) or (\n selected_embedding and \"watsonx\" in type(selected_embedding).__name__.lower()\n )\n logger.debug(f\"Is IBM: {is_ibm}\")\n max_workers = 1 if is_ibm else min(max(len(texts), 1), 8)\n\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(\n f\"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\"\n )\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n logger.debug(f\"Model detection query: {agg_query}\")\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n # Log detailed bucket info for debugging\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n if not models:\n total_hits = result.get(\"hits\", {}).get(\"total\", {})\n total_count = total_hits.get(\"value\", 0) if isinstance(total_hits, dict) else total_hits\n logger.warning(\n f\"No embedding_model values found in index '{self.index_name}'. \"\n f\"Total docs in index: {total_count}. \"\n f\"This may indicate documents were indexed without the embedding_model field.\"\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n fallback_model = self._get_embedding_model_name()\n logger.info(f\"Using fallback model: {fallback_model}\")\n return [fallback_model]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n if not embeddings_list:\n logger.error(\n \"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\"\n )\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models_attr}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models_attr and isinstance(available_models_attr, dict):\n logger.info(\n f\"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict\"\n )\n for model_name_key, dedicated_embedding in available_models_attr.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n self.log(f\"[SEARCH] Models detected in index: {available_models}\")\n self.log(f\"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n # Track matching status for debugging\n matched_models = []\n unmatched_models = []\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n matched_models.append(model_name)\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n self.log(f\"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding\")\n else:\n # No matching embedding found for this model\n unmatched_models.append(model_name)\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}\")\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n self.log(f\"[ERROR] Embedding generation failed for '{model_name}': {e}\")\n\n # Log summary of model matching\n logger.info(f\"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n self.log(f\"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n if unmatched_models:\n self.log(f\"[WARN] Unmatched models in index: {unmatched_models}\")\n\n if not query_embeddings:\n msg = (\n f\"Failed to generate embeddings for any model. \"\n f\"Index has models: {available_models}, but no matching embedding objects found. \"\n f\"Available embedding identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[FAIL] Search failed: {msg}\")\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n self.log(f\"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'\")\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n self.log(f\"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping\")\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n self.log(\n f\"[WARN] No valid KNN queries could be built. \"\n f\"Query embeddings generated: {list(query_embeddings.keys())}, \"\n f\"but no matching knn_vector fields found in index.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: \"\n f\"{list(query_embeddings.keys())}\"\n )\n self.log(f\"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}\")\n self.log(f\"[EXEC] Embedding models used: {list(query_embeddings.keys())}\")\n self.log(f\"[EXEC] KNN fields being queried: {embedding_fields}\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n self.log(f\"[RESULT] Search complete: {len(hits)} results found\")\n\n if len(hits) == 0:\n self.log(\n f\"[EMPTY] Debug info: \"\n f\"models_in_index={available_models}, \"\n f\"matched_models={matched_models}, \"\n f\"knn_fields={embedding_fields}, \"\n f\"filters={len(filter_clauses)} clauses\"\n )\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n logger.info(f\"Search query: {self.search_query}\")\n if self._cached_vector_store is None:\n self.build_vector_store()\n\n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n\n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" + "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n\n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"Starting ingestion mode...\")\n\n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n logger.debug(\n f\"[INGESTION] ingest_data type: \"\n f\"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\"\n )\n logger.debug(\n f\"[INGESTION] ingest_data content: \"\n f\"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\"\n )\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings with rate-limit-aware retry logic using tenacity\n from tenacity import (\n retry,\n retry_if_exception,\n stop_after_attempt,\n wait_exponential,\n )\n\n def is_rate_limit_error(exception: Exception) -> bool:\n \"\"\"Check if exception is a rate limit error (429).\"\"\"\n error_str = str(exception).lower()\n return \"429\" in error_str or \"rate_limit\" in error_str or \"rate limit\" in error_str\n\n def is_other_retryable_error(exception: Exception) -> bool:\n \"\"\"Check if exception is retryable but not a rate limit error.\"\"\"\n # Retry on most exceptions except for specific non-retryable ones\n # Add other non-retryable exceptions here if needed\n return not is_rate_limit_error(exception)\n\n # Create retry decorator for rate limit errors (longer backoff)\n retry_on_rate_limit = retry(\n retry=retry_if_exception(is_rate_limit_error),\n stop=stop_after_attempt(5),\n wait=wait_exponential(multiplier=2, min=2, max=30),\n reraise=True,\n before_sleep=lambda retry_state: logger.warning(\n f\"Rate limit hit for chunk (attempt {retry_state.attempt_number}/5), \"\n f\"backing off for {retry_state.next_action.sleep:.1f}s\"\n ),\n )\n\n # Create retry decorator for other errors (shorter backoff)\n retry_on_other_errors = retry(\n retry=retry_if_exception(is_other_retryable_error),\n stop=stop_after_attempt(3),\n wait=wait_exponential(multiplier=1, min=1, max=8),\n reraise=True,\n before_sleep=lambda retry_state: logger.warning(\n f\"Error embedding chunk (attempt {retry_state.attempt_number}/3), \"\n f\"retrying in {retry_state.next_action.sleep:.1f}s: {retry_state.outcome.exception()}\"\n ),\n )\n\n def embed_chunk_with_retry(chunk_text: str, chunk_idx: int) -> list[float]:\n \"\"\"Embed a single chunk with rate-limit-aware retry logic.\"\"\"\n\n @retry_on_rate_limit\n @retry_on_other_errors\n def _embed(text: str) -> list[float]:\n return selected_embedding.embed_documents([text])[0]\n\n try:\n return _embed(chunk_text)\n except Exception as e:\n logger.error(\n f\"Failed to embed chunk {chunk_idx} after all retries: {e}\",\n error=str(e),\n )\n raise\n\n # Restrict concurrency for IBM/Watsonx models to avoid rate limits\n is_ibm = (embedding_model and \"ibm\" in str(embedding_model).lower()) or (\n selected_embedding and \"watsonx\" in type(selected_embedding).__name__.lower()\n )\n logger.debug(f\"Is IBM: {is_ibm}\")\n\n # For IBM models, use sequential processing with rate limiting\n # For other models, use parallel processing\n vectors: list[list[float]] = [None] * len(texts)\n\n if is_ibm:\n # Sequential processing with inter-request delay for IBM models\n inter_request_delay = 0.6 # ~1.67 req/s, safely under 2 req/s limit\n logger.info(\n f\"Using sequential processing for IBM model with {inter_request_delay}s delay between requests\"\n )\n\n for idx, chunk in enumerate(texts):\n if idx > 0:\n # Add delay between requests (but not before the first one)\n time.sleep(inter_request_delay)\n vectors[idx] = embed_chunk_with_retry(chunk, idx)\n else:\n # Parallel processing for non-IBM models\n max_workers = min(max(len(texts), 1), 8)\n logger.debug(f\"Using parallel processing with {max_workers} workers\")\n\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk_with_retry, chunk, idx): idx for idx, chunk in enumerate(texts)}\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(\n f\"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\"\n )\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n logger.debug(f\"Model detection query: {agg_query}\")\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n # Log detailed bucket info for debugging\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n if not models:\n total_hits = result.get(\"hits\", {}).get(\"total\", {})\n total_count = total_hits.get(\"value\", 0) if isinstance(total_hits, dict) else total_hits\n logger.warning(\n f\"No embedding_model values found in index '{self.index_name}'. \"\n f\"Total docs in index: {total_count}. \"\n f\"This may indicate documents were indexed without the embedding_model field.\"\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n fallback_model = self._get_embedding_model_name()\n logger.info(f\"Using fallback model: {fallback_model}\")\n return [fallback_model]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n if not embeddings_list:\n logger.error(\n \"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\"\n )\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models_attr}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models_attr and isinstance(available_models_attr, dict):\n logger.info(\n f\"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict\"\n )\n for model_name_key, dedicated_embedding in available_models_attr.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n self.log(f\"[SEARCH] Models detected in index: {available_models}\")\n self.log(f\"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n # Track matching status for debugging\n matched_models = []\n unmatched_models = []\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n matched_models.append(model_name)\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n self.log(f\"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding\")\n else:\n # No matching embedding found for this model\n unmatched_models.append(model_name)\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}\")\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n self.log(f\"[ERROR] Embedding generation failed for '{model_name}': {e}\")\n\n # Log summary of model matching\n logger.info(f\"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n self.log(f\"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n if unmatched_models:\n self.log(f\"[WARN] Unmatched models in index: {unmatched_models}\")\n\n if not query_embeddings:\n msg = (\n f\"Failed to generate embeddings for any model. \"\n f\"Index has models: {available_models}, but no matching embedding objects found. \"\n f\"Available embedding identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[FAIL] Search failed: {msg}\")\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n self.log(f\"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'\")\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n self.log(f\"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping\")\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n self.log(\n f\"[WARN] No valid KNN queries could be built. \"\n f\"Query embeddings generated: {list(query_embeddings.keys())}, \"\n f\"but no matching knn_vector fields found in index.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: \"\n f\"{list(query_embeddings.keys())}\"\n )\n self.log(f\"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}\")\n self.log(f\"[EXEC] Embedding models used: {list(query_embeddings.keys())}\")\n self.log(f\"[EXEC] KNN fields being queried: {embedding_fields}\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n self.log(f\"[RESULT] Search complete: {len(hits)} results found\")\n\n if len(hits) == 0:\n self.log(\n f\"[EMPTY] Debug info: \"\n f\"models_in_index={available_models}, \"\n f\"matched_models={matched_models}, \"\n f\"knn_fields={embedding_fields}, \"\n f\"filters={len(filter_clauses)} clauses\"\n )\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n logger.info(f\"Search query: {self.search_query}\")\n if self._cached_vector_store is None:\n self.build_vector_store()\n\n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n\n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" }, "docs_metadata": { "_input_type": "TableInput", diff --git a/flows/openrag_url_mcp.json b/flows/openrag_url_mcp.json index 008e6abd..29d4d12d 100644 --- a/flows/openrag_url_mcp.json +++ b/flows/openrag_url_mcp.json @@ -3723,7 +3723,7 @@ "show": true, "title_case": false, "type": "code", - "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n\n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"Starting ingestion mode...\")\n\n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n logger.debug(\n f\"[INGESTION] ingest_data type: \"\n f\"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\"\n )\n logger.debug(\n f\"[INGESTION] ingest_data content: \"\n f\"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\"\n )\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings (threaded for concurrency) with retries\n def embed_chunk(chunk_text: str) -> list[float]:\n return selected_embedding.embed_documents([chunk_text])[0]\n\n vectors: list[list[float]] | None = None\n last_exception: Exception | None = None\n delay = 1.0\n attempts = 0\n max_attempts = 3\n\n while attempts < max_attempts:\n attempts += 1\n try:\n # Restrict concurrency for IBM/Watsonx models to avoid rate limits\n is_ibm = (embedding_model and \"ibm\" in str(embedding_model).lower()) or (\n selected_embedding and \"watsonx\" in type(selected_embedding).__name__.lower()\n )\n logger.debug(f\"Is IBM: {is_ibm}\")\n max_workers = 1 if is_ibm else min(max(len(texts), 1), 8)\n\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk, chunk): idx for idx, chunk in enumerate(texts)}\n vectors = [None] * len(texts)\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n break\n except Exception as exc:\n last_exception = exc\n if attempts >= max_attempts:\n logger.error(\n f\"Embedding generation failed for model {embedding_model} after retries\",\n error=str(exc),\n )\n raise\n logger.warning(\n \"Threaded embedding generation failed for model %s (attempt %s/%s), retrying in %.1fs\",\n embedding_model,\n attempts,\n max_attempts,\n delay,\n )\n time.sleep(delay)\n delay = min(delay * 2, 8.0)\n\n if vectors is None:\n raise RuntimeError(\n f\"Embedding generation failed for {embedding_model}: {last_exception}\"\n if last_exception\n else f\"Embedding generation failed for {embedding_model}\"\n )\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(\n f\"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\"\n )\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n logger.debug(f\"Model detection query: {agg_query}\")\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n # Log detailed bucket info for debugging\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n if not models:\n total_hits = result.get(\"hits\", {}).get(\"total\", {})\n total_count = total_hits.get(\"value\", 0) if isinstance(total_hits, dict) else total_hits\n logger.warning(\n f\"No embedding_model values found in index '{self.index_name}'. \"\n f\"Total docs in index: {total_count}. \"\n f\"This may indicate documents were indexed without the embedding_model field.\"\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n fallback_model = self._get_embedding_model_name()\n logger.info(f\"Using fallback model: {fallback_model}\")\n return [fallback_model]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n if not embeddings_list:\n logger.error(\n \"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\"\n )\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models_attr}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models_attr and isinstance(available_models_attr, dict):\n logger.info(\n f\"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict\"\n )\n for model_name_key, dedicated_embedding in available_models_attr.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n self.log(f\"[SEARCH] Models detected in index: {available_models}\")\n self.log(f\"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n # Track matching status for debugging\n matched_models = []\n unmatched_models = []\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n matched_models.append(model_name)\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n self.log(f\"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding\")\n else:\n # No matching embedding found for this model\n unmatched_models.append(model_name)\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}\")\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n self.log(f\"[ERROR] Embedding generation failed for '{model_name}': {e}\")\n\n # Log summary of model matching\n logger.info(f\"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n self.log(f\"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n if unmatched_models:\n self.log(f\"[WARN] Unmatched models in index: {unmatched_models}\")\n\n if not query_embeddings:\n msg = (\n f\"Failed to generate embeddings for any model. \"\n f\"Index has models: {available_models}, but no matching embedding objects found. \"\n f\"Available embedding identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[FAIL] Search failed: {msg}\")\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n self.log(f\"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'\")\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n self.log(f\"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping\")\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n self.log(\n f\"[WARN] No valid KNN queries could be built. \"\n f\"Query embeddings generated: {list(query_embeddings.keys())}, \"\n f\"but no matching knn_vector fields found in index.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: \"\n f\"{list(query_embeddings.keys())}\"\n )\n self.log(f\"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}\")\n self.log(f\"[EXEC] Embedding models used: {list(query_embeddings.keys())}\")\n self.log(f\"[EXEC] KNN fields being queried: {embedding_fields}\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n self.log(f\"[RESULT] Search complete: {len(hits)} results found\")\n\n if len(hits) == 0:\n self.log(\n f\"[EMPTY] Debug info: \"\n f\"models_in_index={available_models}, \"\n f\"matched_models={matched_models}, \"\n f\"knn_fields={embedding_fields}, \"\n f\"filters={len(filter_clauses)} clauses\"\n )\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n logger.info(f\"Search query: {self.search_query}\")\n if self._cached_vector_store is None:\n self.build_vector_store()\n\n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n\n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" + "value": "from __future__ import annotations\n\nimport copy\nimport json\nimport time\nimport uuid\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nfrom typing import Any\n\nfrom opensearchpy import OpenSearch, helpers\nfrom opensearchpy.exceptions import OpenSearchException, RequestError\n\nfrom lfx.base.vectorstores.model import LCVectorStoreComponent, check_cached_vector_store\nfrom lfx.base.vectorstores.vector_store_connection_decorator import vector_store_connection\nfrom lfx.io import BoolInput, DropdownInput, HandleInput, IntInput, MultilineInput, SecretStrInput, StrInput, TableInput\nfrom lfx.log import logger\nfrom lfx.schema.data import Data\n\n\ndef normalize_model_name(model_name: str) -> str:\n \"\"\"Normalize embedding model name for use as field suffix.\n\n Converts model names to valid OpenSearch field names by replacing\n special characters and ensuring alphanumeric format.\n\n Args:\n model_name: Original embedding model name (e.g., \"text-embedding-3-small\")\n\n Returns:\n Normalized field suffix (e.g., \"text_embedding_3_small\")\n \"\"\"\n normalized = model_name.lower()\n # Replace common separators with underscores\n normalized = normalized.replace(\"-\", \"_\").replace(\":\", \"_\").replace(\"/\", \"_\").replace(\".\", \"_\")\n # Remove any non-alphanumeric characters except underscores\n normalized = \"\".join(c if c.isalnum() or c == \"_\" else \"_\" for c in normalized)\n # Remove duplicate underscores\n while \"__\" in normalized:\n normalized = normalized.replace(\"__\", \"_\")\n return normalized.strip(\"_\")\n\n\ndef get_embedding_field_name(model_name: str) -> str:\n \"\"\"Get the dynamic embedding field name for a model.\n\n Args:\n model_name: Embedding model name\n\n Returns:\n Field name in format: chunk_embedding_{normalized_model_name}\n \"\"\"\n logger.info(f\"chunk_embedding_{normalize_model_name(model_name)}\")\n return f\"chunk_embedding_{normalize_model_name(model_name)}\"\n\n\n@vector_store_connection\nclass OpenSearchVectorStoreComponentMultimodalMultiEmbedding(LCVectorStoreComponent):\n \"\"\"OpenSearch Vector Store Component with Multi-Model Hybrid Search Capabilities.\n\n This component provides vector storage and retrieval using OpenSearch, combining semantic\n similarity search (KNN) with keyword-based search for optimal results. It supports:\n - Multiple embedding models per index with dynamic field names\n - Automatic detection and querying of all available embedding models\n - Parallel embedding generation for multi-model search\n - Document ingestion with model tracking\n - Advanced filtering and aggregations\n - Flexible authentication options\n\n Features:\n - Multi-model vector storage with dynamic fields (chunk_embedding_{model_name})\n - Hybrid search combining multiple KNN queries (dis_max) + keyword matching\n - Auto-detection of available models in the index\n - Parallel query embedding generation for all detected models\n - Vector storage with configurable engines (jvector, nmslib, faiss, lucene)\n - Flexible authentication (Basic auth, JWT tokens)\n\n Model Name Resolution:\n - Priority: deployment > model > model_name attributes\n - This ensures correct matching between embedding objects and index fields\n - When multiple embeddings are provided, specify embedding_model_name to select which one to use\n - During search, each detected model in the index is matched to its corresponding embedding object\n \"\"\"\n\n display_name: str = \"OpenSearch (Multi-Model Multi-Embedding)\"\n icon: str = \"OpenSearch\"\n description: str = (\n \"Store and search documents using OpenSearch with multi-model hybrid semantic and keyword search.\"\n )\n\n # Keys we consider baseline\n default_keys: list[str] = [\n \"opensearch_url\",\n \"index_name\",\n *[i.name for i in LCVectorStoreComponent.inputs], # search_query, add_documents, etc.\n \"embedding\",\n \"embedding_model_name\",\n \"vector_field\",\n \"number_of_results\",\n \"auth_mode\",\n \"username\",\n \"password\",\n \"jwt_token\",\n \"jwt_header\",\n \"bearer_prefix\",\n \"use_ssl\",\n \"verify_certs\",\n \"filter_expression\",\n \"engine\",\n \"space_type\",\n \"ef_construction\",\n \"m\",\n \"num_candidates\",\n \"docs_metadata\",\n ]\n\n inputs = [\n TableInput(\n name=\"docs_metadata\",\n display_name=\"Document Metadata\",\n info=(\n \"Additional metadata key-value pairs to be added to all ingested documents. \"\n \"Useful for tagging documents with source information, categories, or other custom attributes.\"\n ),\n table_schema=[\n {\n \"name\": \"key\",\n \"display_name\": \"Key\",\n \"type\": \"str\",\n \"description\": \"Key name\",\n },\n {\n \"name\": \"value\",\n \"display_name\": \"Value\",\n \"type\": \"str\",\n \"description\": \"Value of the metadata\",\n },\n ],\n value=[],\n input_types=[\"Data\"],\n ),\n StrInput(\n name=\"opensearch_url\",\n display_name=\"OpenSearch URL\",\n value=\"http://localhost:9200\",\n info=(\n \"The connection URL for your OpenSearch cluster \"\n \"(e.g., http://localhost:9200 for local development or your cloud endpoint).\"\n ),\n ),\n StrInput(\n name=\"index_name\",\n display_name=\"Index Name\",\n value=\"langflow\",\n info=(\n \"The OpenSearch index name where documents will be stored and searched. \"\n \"Will be created automatically if it doesn't exist.\"\n ),\n ),\n DropdownInput(\n name=\"engine\",\n display_name=\"Vector Engine\",\n options=[\"jvector\", \"nmslib\", \"faiss\", \"lucene\"],\n value=\"jvector\",\n info=(\n \"Vector search engine for similarity calculations. 'jvector' is recommended for most use cases. \"\n \"Note: Amazon OpenSearch Serverless only supports 'nmslib' or 'faiss'.\"\n ),\n advanced=True,\n ),\n DropdownInput(\n name=\"space_type\",\n display_name=\"Distance Metric\",\n options=[\"l2\", \"l1\", \"cosinesimil\", \"linf\", \"innerproduct\"],\n value=\"l2\",\n info=(\n \"Distance metric for calculating vector similarity. 'l2' (Euclidean) is most common, \"\n \"'cosinesimil' for cosine similarity, 'innerproduct' for dot product.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"ef_construction\",\n display_name=\"EF Construction\",\n value=512,\n info=(\n \"Size of the dynamic candidate list during index construction. \"\n \"Higher values improve recall but increase indexing time and memory usage.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"m\",\n display_name=\"M Parameter\",\n value=16,\n info=(\n \"Number of bidirectional connections for each vector in the HNSW graph. \"\n \"Higher values improve search quality but increase memory usage and indexing time.\"\n ),\n advanced=True,\n ),\n IntInput(\n name=\"num_candidates\",\n display_name=\"Candidate Pool Size\",\n value=1000,\n info=(\n \"Number of approximate neighbors to consider for each KNN query. \"\n \"Some OpenSearch deployments do not support this parameter; set to 0 to disable.\"\n ),\n advanced=True,\n ),\n *LCVectorStoreComponent.inputs, # includes search_query, add_documents, etc.\n HandleInput(name=\"embedding\", display_name=\"Embedding\", input_types=[\"Embeddings\"], is_list=True),\n StrInput(\n name=\"embedding_model_name\",\n display_name=\"Embedding Model Name\",\n value=\"\",\n info=(\n \"Name of the embedding model to use for ingestion. This selects which embedding from the list \"\n \"will be used to embed documents. Matches on deployment, model, model_id, or model_name. \"\n \"For duplicate deployments, use combined format: 'deployment:model' \"\n \"(e.g., 'text-embedding-ada-002:text-embedding-3-large'). \"\n \"Leave empty to use the first embedding. Error message will show all available identifiers.\"\n ),\n advanced=False,\n ),\n StrInput(\n name=\"vector_field\",\n display_name=\"Legacy Vector Field Name\",\n value=\"chunk_embedding\",\n advanced=True,\n info=(\n \"Legacy field name for backward compatibility. New documents use dynamic fields \"\n \"(chunk_embedding_{model_name}) based on the embedding_model_name.\"\n ),\n ),\n IntInput(\n name=\"number_of_results\",\n display_name=\"Default Result Limit\",\n value=10,\n advanced=True,\n info=(\n \"Default maximum number of search results to return when no limit is \"\n \"specified in the filter expression.\"\n ),\n ),\n MultilineInput(\n name=\"filter_expression\",\n display_name=\"Search Filters (JSON)\",\n value=\"\",\n info=(\n \"Optional JSON configuration for search filtering, result limits, and score thresholds.\\n\\n\"\n \"Format 1 - Explicit filters:\\n\"\n '{\"filter\": [{\"term\": {\"filename\":\"doc.pdf\"}}, '\n '{\"terms\":{\"owner\":[\"user1\",\"user2\"]}}], \"limit\": 10, \"score_threshold\": 1.6}\\n\\n'\n \"Format 2 - Context-style mapping:\\n\"\n '{\"data_sources\":[\"file.pdf\"], \"document_types\":[\"application/pdf\"], \"owners\":[\"user123\"]}\\n\\n'\n \"Use __IMPOSSIBLE_VALUE__ as placeholder to ignore specific filters.\"\n ),\n ),\n # ----- Auth controls (dynamic) -----\n DropdownInput(\n name=\"auth_mode\",\n display_name=\"Authentication Mode\",\n value=\"basic\",\n options=[\"basic\", \"jwt\"],\n info=(\n \"Authentication method: 'basic' for username/password authentication, \"\n \"or 'jwt' for JSON Web Token (Bearer) authentication.\"\n ),\n real_time_refresh=True,\n advanced=False,\n ),\n StrInput(\n name=\"username\",\n display_name=\"Username\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"password\",\n display_name=\"OpenSearch Password\",\n value=\"admin\",\n show=True,\n ),\n SecretStrInput(\n name=\"jwt_token\",\n display_name=\"JWT Token\",\n value=\"JWT\",\n load_from_db=False,\n show=False,\n info=(\n \"Valid JSON Web Token for authentication. \"\n \"Will be sent in the Authorization header (with optional 'Bearer ' prefix).\"\n ),\n ),\n StrInput(\n name=\"jwt_header\",\n display_name=\"JWT Header Name\",\n value=\"Authorization\",\n show=False,\n advanced=True,\n ),\n BoolInput(\n name=\"bearer_prefix\",\n display_name=\"Prefix 'Bearer '\",\n value=True,\n show=False,\n advanced=True,\n ),\n # ----- TLS -----\n BoolInput(\n name=\"use_ssl\",\n display_name=\"Use SSL/TLS\",\n value=True,\n advanced=True,\n info=\"Enable SSL/TLS encryption for secure connections to OpenSearch.\",\n ),\n BoolInput(\n name=\"verify_certs\",\n display_name=\"Verify SSL Certificates\",\n value=False,\n advanced=True,\n info=(\n \"Verify SSL certificates when connecting. \"\n \"Disable for self-signed certificates in development environments.\"\n ),\n ),\n ]\n\n def _get_embedding_model_name(self, embedding_obj=None) -> str:\n \"\"\"Get the embedding model name from component config or embedding object.\n\n Priority: deployment > model > model_id > model_name\n This ensures we use the actual model being deployed, not just the configured model.\n Supports multiple embedding providers (OpenAI, Watsonx, Cohere, etc.)\n\n Args:\n embedding_obj: Specific embedding object to get name from (optional)\n\n Returns:\n Embedding model name\n\n Raises:\n ValueError: If embedding model name cannot be determined\n \"\"\"\n # First try explicit embedding_model_name input\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name:\n return self.embedding_model_name.strip()\n\n # Try to get from provided embedding object\n if embedding_obj:\n # Priority: deployment > model > model_id > model_name\n if hasattr(embedding_obj, \"deployment\") and embedding_obj.deployment:\n return str(embedding_obj.deployment)\n if hasattr(embedding_obj, \"model\") and embedding_obj.model:\n return str(embedding_obj.model)\n if hasattr(embedding_obj, \"model_id\") and embedding_obj.model_id:\n return str(embedding_obj.model_id)\n if hasattr(embedding_obj, \"model_name\") and embedding_obj.model_name:\n return str(embedding_obj.model_name)\n\n # Try to get from embedding component (legacy single embedding)\n if hasattr(self, \"embedding\") and self.embedding:\n # Handle list of embeddings\n if isinstance(self.embedding, list) and len(self.embedding) > 0:\n first_emb = self.embedding[0]\n if hasattr(first_emb, \"deployment\") and first_emb.deployment:\n return str(first_emb.deployment)\n if hasattr(first_emb, \"model\") and first_emb.model:\n return str(first_emb.model)\n if hasattr(first_emb, \"model_id\") and first_emb.model_id:\n return str(first_emb.model_id)\n if hasattr(first_emb, \"model_name\") and first_emb.model_name:\n return str(first_emb.model_name)\n # Handle single embedding\n elif not isinstance(self.embedding, list):\n if hasattr(self.embedding, \"deployment\") and self.embedding.deployment:\n return str(self.embedding.deployment)\n if hasattr(self.embedding, \"model\") and self.embedding.model:\n return str(self.embedding.model)\n if hasattr(self.embedding, \"model_id\") and self.embedding.model_id:\n return str(self.embedding.model_id)\n if hasattr(self.embedding, \"model_name\") and self.embedding.model_name:\n return str(self.embedding.model_name)\n\n msg = (\n \"Could not determine embedding model name. \"\n \"Please set the 'embedding_model_name' field or ensure the embedding component \"\n \"has a 'deployment', 'model', 'model_id', or 'model_name' attribute.\"\n )\n raise ValueError(msg)\n\n # ---------- helper functions for index management ----------\n def _default_text_mapping(\n self,\n dim: int,\n engine: str = \"jvector\",\n space_type: str = \"l2\",\n ef_search: int = 512,\n ef_construction: int = 100,\n m: int = 16,\n vector_field: str = \"vector_field\",\n ) -> dict[str, Any]:\n \"\"\"Create the default OpenSearch index mapping for vector search.\n\n This method generates the index configuration with k-NN settings optimized\n for approximate nearest neighbor search using the specified vector engine.\n Includes the embedding_model keyword field for tracking which model was used.\n\n Args:\n dim: Dimensionality of the vector embeddings\n engine: Vector search engine (jvector, nmslib, faiss, lucene)\n space_type: Distance metric for similarity calculation\n ef_search: Size of dynamic list used during search\n ef_construction: Size of dynamic list used during index construction\n m: Number of bidirectional links for each vector\n vector_field: Name of the field storing vector embeddings\n\n Returns:\n Dictionary containing OpenSearch index mapping configuration\n \"\"\"\n return {\n \"settings\": {\"index\": {\"knn\": True, \"knn.algo_param.ef_search\": ef_search}},\n \"mappings\": {\n \"properties\": {\n vector_field: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n \"embedding_model\": {\"type\": \"keyword\"}, # Track which model was used\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n },\n }\n\n def _ensure_embedding_field_mapping(\n self,\n client: OpenSearch,\n index_name: str,\n field_name: str,\n dim: int,\n engine: str,\n space_type: str,\n ef_construction: int,\n m: int,\n ) -> None:\n \"\"\"Lazily add a dynamic embedding field to the index if it doesn't exist.\n\n This allows adding new embedding models without recreating the entire index.\n Also ensures the embedding_model tracking field exists.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index name\n field_name: Dynamic field name for this embedding model\n dim: Vector dimensionality\n engine: Vector search engine\n space_type: Distance metric\n ef_construction: Construction parameter\n m: HNSW parameter\n \"\"\"\n try:\n mapping = {\n \"properties\": {\n field_name: {\n \"type\": \"knn_vector\",\n \"dimension\": dim,\n \"method\": {\n \"name\": \"disk_ann\",\n \"space_type\": space_type,\n \"engine\": engine,\n \"parameters\": {\"ef_construction\": ef_construction, \"m\": m},\n },\n },\n # Also ensure the embedding_model tracking field exists as keyword\n \"embedding_model\": {\"type\": \"keyword\"},\n \"embedding_dimensions\": {\"type\": \"integer\"},\n }\n }\n client.indices.put_mapping(index=index_name, body=mapping)\n logger.info(f\"Added/updated embedding field mapping: {field_name}\")\n except Exception as e:\n logger.warning(f\"Could not add embedding field mapping for {field_name}: {e}\")\n raise\n\n properties = self._get_index_properties(client)\n if not self._is_knn_vector_field(properties, field_name):\n msg = f\"Field '{field_name}' is not mapped as knn_vector. Current mapping: {properties.get(field_name)}\"\n logger.aerror(msg)\n raise ValueError(msg)\n\n def _validate_aoss_with_engines(self, *, is_aoss: bool, engine: str) -> None:\n \"\"\"Validate engine compatibility with Amazon OpenSearch Serverless (AOSS).\n\n Amazon OpenSearch Serverless has restrictions on which vector engines\n can be used. This method ensures the selected engine is compatible.\n\n Args:\n is_aoss: Whether the connection is to Amazon OpenSearch Serverless\n engine: The selected vector search engine\n\n Raises:\n ValueError: If AOSS is used with an incompatible engine\n \"\"\"\n if is_aoss and engine not in {\"nmslib\", \"faiss\"}:\n msg = \"Amazon OpenSearch Service Serverless only supports `nmslib` or `faiss` engines\"\n raise ValueError(msg)\n\n def _is_aoss_enabled(self, http_auth: Any) -> bool:\n \"\"\"Determine if Amazon OpenSearch Serverless (AOSS) is being used.\n\n Args:\n http_auth: The HTTP authentication object\n\n Returns:\n True if AOSS is enabled, False otherwise\n \"\"\"\n return http_auth is not None and hasattr(http_auth, \"service\") and http_auth.service == \"aoss\"\n\n def _bulk_ingest_embeddings(\n self,\n client: OpenSearch,\n index_name: str,\n embeddings: list[list[float]],\n texts: list[str],\n metadatas: list[dict] | None = None,\n ids: list[str] | None = None,\n vector_field: str = \"vector_field\",\n text_field: str = \"text\",\n embedding_model: str = \"unknown\",\n mapping: dict | None = None,\n max_chunk_bytes: int | None = 1 * 1024 * 1024,\n *,\n is_aoss: bool = False,\n ) -> list[str]:\n \"\"\"Efficiently ingest multiple documents with embeddings into OpenSearch.\n\n This method uses bulk operations to insert documents with their vector\n embeddings and metadata into the specified OpenSearch index. Each document\n is tagged with the embedding_model name for tracking.\n\n Args:\n client: OpenSearch client instance\n index_name: Target index for document storage\n embeddings: List of vector embeddings for each document\n texts: List of document texts\n metadatas: Optional metadata dictionaries for each document\n ids: Optional document IDs (UUIDs generated if not provided)\n vector_field: Field name for storing vector embeddings\n text_field: Field name for storing document text\n embedding_model: Name of the embedding model used\n mapping: Optional index mapping configuration\n max_chunk_bytes: Maximum size per bulk request chunk\n is_aoss: Whether using Amazon OpenSearch Serverless\n\n Returns:\n List of document IDs that were successfully ingested\n \"\"\"\n if not mapping:\n mapping = {}\n\n requests = []\n return_ids = []\n vector_dimensions = len(embeddings[0]) if embeddings else None\n\n for i, text in enumerate(texts):\n metadata = metadatas[i] if metadatas else {}\n if vector_dimensions is not None and \"embedding_dimensions\" not in metadata:\n metadata = {**metadata, \"embedding_dimensions\": vector_dimensions}\n _id = ids[i] if ids else str(uuid.uuid4())\n request = {\n \"_op_type\": \"index\",\n \"_index\": index_name,\n vector_field: embeddings[i],\n text_field: text,\n \"embedding_model\": embedding_model, # Track which model was used\n **metadata,\n }\n if is_aoss:\n request[\"id\"] = _id\n else:\n request[\"_id\"] = _id\n requests.append(request)\n return_ids.append(_id)\n if metadatas:\n self.log(f\"Sample metadata: {metadatas[0] if metadatas else {}}\")\n helpers.bulk(client, requests, max_chunk_bytes=max_chunk_bytes)\n return return_ids\n\n # ---------- auth / client ----------\n def _build_auth_kwargs(self) -> dict[str, Any]:\n \"\"\"Build authentication configuration for OpenSearch client.\n\n Constructs the appropriate authentication parameters based on the\n selected auth mode (basic username/password or JWT token).\n\n Returns:\n Dictionary containing authentication configuration\n\n Raises:\n ValueError: If required authentication parameters are missing\n \"\"\"\n mode = (self.auth_mode or \"basic\").strip().lower()\n if mode == \"jwt\":\n token = (self.jwt_token or \"\").strip()\n if not token:\n msg = \"Auth Mode is 'jwt' but no jwt_token was provided.\"\n raise ValueError(msg)\n header_name = (self.jwt_header or \"Authorization\").strip()\n header_value = f\"Bearer {token}\" if self.bearer_prefix else token\n return {\"headers\": {header_name: header_value}}\n user = (self.username or \"\").strip()\n pwd = (self.password or \"\").strip()\n if not user or not pwd:\n msg = \"Auth Mode is 'basic' but username/password are missing.\"\n raise ValueError(msg)\n return {\"http_auth\": (user, pwd)}\n\n def build_client(self) -> OpenSearch:\n \"\"\"Create and configure an OpenSearch client instance.\n\n Returns:\n Configured OpenSearch client ready for operations\n \"\"\"\n auth_kwargs = self._build_auth_kwargs()\n return OpenSearch(\n hosts=[self.opensearch_url],\n use_ssl=self.use_ssl,\n verify_certs=self.verify_certs,\n ssl_assert_hostname=False,\n ssl_show_warn=False,\n **auth_kwargs,\n )\n\n @check_cached_vector_store\n def build_vector_store(self) -> OpenSearch:\n # Return raw OpenSearch client as our \"vector store.\"\n client = self.build_client()\n\n # Check if we're in ingestion-only mode (no search query)\n has_search_query = bool((self.search_query or \"\").strip())\n if not has_search_query:\n logger.debug(\"Ingestion-only mode activated: search operations will be skipped\")\n logger.debug(\"Starting ingestion mode...\")\n\n logger.warning(f\"Embedding: {self.embedding}\")\n self._add_documents_to_vector_store(client=client)\n return client\n\n # ---------- ingest ----------\n def _add_documents_to_vector_store(self, client: OpenSearch) -> None:\n \"\"\"Process and ingest documents into the OpenSearch vector store.\n\n This method handles the complete document ingestion pipeline:\n - Prepares document data and metadata\n - Generates vector embeddings using the selected model\n - Creates appropriate index mappings with dynamic field names\n - Bulk inserts documents with vectors and model tracking\n\n Args:\n client: OpenSearch client for performing operations\n \"\"\"\n logger.debug(\"[INGESTION] _add_documents_to_vector_store called\")\n # Convert DataFrame to Data if needed using parent's method\n self.ingest_data = self._prepare_ingest_data()\n\n logger.debug(\n f\"[INGESTION] ingest_data type: \"\n f\"{type(self.ingest_data)}, length: {len(self.ingest_data) if self.ingest_data else 0}\"\n )\n logger.debug(\n f\"[INGESTION] ingest_data content: \"\n f\"{self.ingest_data[:2] if self.ingest_data and len(self.ingest_data) > 0 else 'empty'}\"\n )\n\n docs = self.ingest_data or []\n if not docs:\n logger.debug(\"Ingestion complete: No documents provided\")\n return\n\n if not self.embedding:\n msg = \"Embedding handle is required to embed documents.\"\n raise ValueError(msg)\n\n # Normalize embedding to list first\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n\n # Filter out None values (fail-safe mode) - do this BEFORE checking if empty\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n # NOW check if we have any valid embeddings left after filtering\n if not embeddings_list:\n logger.warning(\"All embeddings returned None (fail-safe mode enabled). Skipping document ingestion.\")\n self.log(\"Embedding returned None (fail-safe mode enabled). Skipping document ingestion.\")\n return\n\n logger.debug(f\"[INGESTION] Valid embeddings after filtering: {len(embeddings_list)}\")\n self.log(f\"Available embedding models: {len(embeddings_list)}\")\n\n # Select the embedding to use for ingestion\n selected_embedding = None\n embedding_model = None\n\n # If embedding_model_name is specified, find matching embedding\n if hasattr(self, \"embedding_model_name\") and self.embedding_model_name and self.embedding_model_name.strip():\n target_model_name = self.embedding_model_name.strip()\n self.log(f\"Looking for embedding model: {target_model_name}\")\n\n for emb_obj in embeddings_list:\n # Check all possible model identifiers (deployment, model, model_id, model_name)\n # Also check available_models list from EmbeddingsWithModels\n possible_names = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n if deployment:\n possible_names.append(str(deployment))\n if model:\n possible_names.append(str(model))\n if model_id:\n possible_names.append(str(model_id))\n if model_name:\n possible_names.append(str(model_name))\n\n # Also add combined identifier\n if deployment and model and deployment != model:\n possible_names.append(f\"{deployment}:{model}\")\n\n # Add all models from available_models dict\n if available_models_attr and isinstance(available_models_attr, dict):\n possible_names.extend(\n str(model_key).strip()\n for model_key in available_models_attr\n if model_key and str(model_key).strip()\n )\n\n # Match if target matches any of the possible names\n if target_model_name in possible_names:\n # Check if target is in available_models dict - use dedicated instance\n if (\n available_models_attr\n and isinstance(available_models_attr, dict)\n and target_model_name in available_models_attr\n ):\n # Use the dedicated embedding instance from the dict\n selected_embedding = available_models_attr[target_model_name]\n embedding_model = target_model_name\n self.log(f\"Found dedicated embedding instance for '{embedding_model}' in available_models dict\")\n else:\n # Traditional identifier match\n selected_embedding = emb_obj\n embedding_model = self._get_embedding_model_name(emb_obj)\n self.log(f\"Found matching embedding model: {embedding_model} (matched on: {target_model_name})\")\n break\n\n if not selected_embedding:\n # Build detailed list of available embeddings with all their identifiers\n available_info = []\n for idx, emb in enumerate(embeddings_list):\n emb_type = type(emb).__name__\n identifiers = []\n deployment = getattr(emb, \"deployment\", None)\n model = getattr(emb, \"model\", None)\n model_id = getattr(emb, \"model_id\", None)\n model_name = getattr(emb, \"model_name\", None)\n available_models_attr = getattr(emb, \"available_models\", None)\n\n if deployment:\n identifiers.append(f\"deployment='{deployment}'\")\n if model:\n identifiers.append(f\"model='{model}'\")\n if model_id:\n identifiers.append(f\"model_id='{model_id}'\")\n if model_name:\n identifiers.append(f\"model_name='{model_name}'\")\n\n # Add combined identifier as an option\n if deployment and model and deployment != model:\n identifiers.append(f\"combined='{deployment}:{model}'\")\n\n # Add available_models dict if present\n if available_models_attr and isinstance(available_models_attr, dict):\n identifiers.append(f\"available_models={list(available_models_attr.keys())}\")\n\n available_info.append(\n f\" [{idx}] {emb_type}: {', '.join(identifiers) if identifiers else 'No identifiers'}\"\n )\n\n msg = (\n f\"Embedding model '{target_model_name}' not found in available embeddings.\\n\\n\"\n f\"Available embeddings:\\n\" + \"\\n\".join(available_info) + \"\\n\\n\"\n \"Please set 'embedding_model_name' to one of the identifier values shown above \"\n \"(use the value after the '=' sign, without quotes).\\n\"\n \"For duplicate deployments, use the 'combined' format.\\n\"\n \"Or leave it empty to use the first embedding.\"\n )\n raise ValueError(msg)\n else:\n # Use first embedding if no model name specified\n selected_embedding = embeddings_list[0]\n embedding_model = self._get_embedding_model_name(selected_embedding)\n self.log(f\"No embedding_model_name specified, using first embedding: {embedding_model}\")\n\n dynamic_field_name = get_embedding_field_name(embedding_model)\n\n logger.info(f\"Selected embedding model for ingestion: '{embedding_model}'\")\n self.log(f\"Using embedding model for ingestion: {embedding_model}\")\n self.log(f\"Dynamic vector field: {dynamic_field_name}\")\n\n # Log embedding details for debugging\n if hasattr(selected_embedding, \"deployment\"):\n logger.info(f\"Embedding deployment: {selected_embedding.deployment}\")\n if hasattr(selected_embedding, \"model\"):\n logger.info(f\"Embedding model: {selected_embedding.model}\")\n if hasattr(selected_embedding, \"model_id\"):\n logger.info(f\"Embedding model_id: {selected_embedding.model_id}\")\n if hasattr(selected_embedding, \"dimensions\"):\n logger.info(f\"Embedding dimensions: {selected_embedding.dimensions}\")\n if hasattr(selected_embedding, \"available_models\"):\n logger.info(f\"Embedding available_models: {selected_embedding.available_models}\")\n\n # No model switching needed - each model in available_models has its own dedicated instance\n # The selected_embedding is already configured correctly for the target model\n logger.info(f\"Using embedding instance for '{embedding_model}' - pre-configured and ready to use\")\n\n # Extract texts and metadata from documents\n texts = []\n metadatas = []\n # Process docs_metadata table input into a dict\n additional_metadata = {}\n logger.debug(f\"[LF] Docs metadata {self.docs_metadata}\")\n if hasattr(self, \"docs_metadata\") and self.docs_metadata:\n logger.info(f\"[LF] Docs metadata {self.docs_metadata}\")\n if isinstance(self.docs_metadata[-1], Data):\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n self.docs_metadata = self.docs_metadata[-1].data\n logger.info(f\"[LF] Docs metadata is a Data object {self.docs_metadata}\")\n additional_metadata.update(self.docs_metadata)\n else:\n for item in self.docs_metadata:\n if isinstance(item, dict) and \"key\" in item and \"value\" in item:\n additional_metadata[item[\"key\"]] = item[\"value\"]\n # Replace string \"None\" values with actual None\n for key, value in additional_metadata.items():\n if value == \"None\":\n additional_metadata[key] = None\n logger.info(f\"[LF] Additional metadata {additional_metadata}\")\n for doc_obj in docs:\n data_copy = json.loads(doc_obj.model_dump_json())\n text = data_copy.pop(doc_obj.text_key, doc_obj.default_value)\n texts.append(text)\n\n # Merge additional metadata from table input\n data_copy.update(additional_metadata)\n\n metadatas.append(data_copy)\n self.log(metadatas)\n\n # Generate embeddings with rate-limit-aware retry logic using tenacity\n from tenacity import (\n retry,\n retry_if_exception,\n stop_after_attempt,\n wait_exponential,\n )\n\n def is_rate_limit_error(exception: Exception) -> bool:\n \"\"\"Check if exception is a rate limit error (429).\"\"\"\n error_str = str(exception).lower()\n return \"429\" in error_str or \"rate_limit\" in error_str or \"rate limit\" in error_str\n\n def is_other_retryable_error(exception: Exception) -> bool:\n \"\"\"Check if exception is retryable but not a rate limit error.\"\"\"\n # Retry on most exceptions except for specific non-retryable ones\n # Add other non-retryable exceptions here if needed\n return not is_rate_limit_error(exception)\n\n # Create retry decorator for rate limit errors (longer backoff)\n retry_on_rate_limit = retry(\n retry=retry_if_exception(is_rate_limit_error),\n stop=stop_after_attempt(5),\n wait=wait_exponential(multiplier=2, min=2, max=30),\n reraise=True,\n before_sleep=lambda retry_state: logger.warning(\n f\"Rate limit hit for chunk (attempt {retry_state.attempt_number}/5), \"\n f\"backing off for {retry_state.next_action.sleep:.1f}s\"\n ),\n )\n\n # Create retry decorator for other errors (shorter backoff)\n retry_on_other_errors = retry(\n retry=retry_if_exception(is_other_retryable_error),\n stop=stop_after_attempt(3),\n wait=wait_exponential(multiplier=1, min=1, max=8),\n reraise=True,\n before_sleep=lambda retry_state: logger.warning(\n f\"Error embedding chunk (attempt {retry_state.attempt_number}/3), \"\n f\"retrying in {retry_state.next_action.sleep:.1f}s: {retry_state.outcome.exception()}\"\n ),\n )\n\n def embed_chunk_with_retry(chunk_text: str, chunk_idx: int) -> list[float]:\n \"\"\"Embed a single chunk with rate-limit-aware retry logic.\"\"\"\n\n @retry_on_rate_limit\n @retry_on_other_errors\n def _embed(text: str) -> list[float]:\n return selected_embedding.embed_documents([text])[0]\n\n try:\n return _embed(chunk_text)\n except Exception as e:\n logger.error(\n f\"Failed to embed chunk {chunk_idx} after all retries: {e}\",\n error=str(e),\n )\n raise\n\n # Restrict concurrency for IBM/Watsonx models to avoid rate limits\n is_ibm = (embedding_model and \"ibm\" in str(embedding_model).lower()) or (\n selected_embedding and \"watsonx\" in type(selected_embedding).__name__.lower()\n )\n logger.debug(f\"Is IBM: {is_ibm}\")\n\n # For IBM models, use sequential processing with rate limiting\n # For other models, use parallel processing\n vectors: list[list[float]] = [None] * len(texts)\n\n if is_ibm:\n # Sequential processing with inter-request delay for IBM models\n inter_request_delay = 0.6 # ~1.67 req/s, safely under 2 req/s limit\n logger.info(\n f\"Using sequential processing for IBM model with {inter_request_delay}s delay between requests\"\n )\n\n for idx, chunk in enumerate(texts):\n if idx > 0:\n # Add delay between requests (but not before the first one)\n time.sleep(inter_request_delay)\n vectors[idx] = embed_chunk_with_retry(chunk, idx)\n else:\n # Parallel processing for non-IBM models\n max_workers = min(max(len(texts), 1), 8)\n logger.debug(f\"Using parallel processing with {max_workers} workers\")\n\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n futures = {executor.submit(embed_chunk_with_retry, chunk, idx): idx for idx, chunk in enumerate(texts)}\n for future in as_completed(futures):\n idx = futures[future]\n vectors[idx] = future.result()\n\n if not vectors:\n self.log(f\"No vectors generated from documents for model {embedding_model}.\")\n return\n\n # Get vector dimension for mapping\n dim = len(vectors[0]) if vectors else 768 # default fallback\n\n # Check for AOSS\n auth_kwargs = self._build_auth_kwargs()\n is_aoss = self._is_aoss_enabled(auth_kwargs.get(\"http_auth\"))\n\n # Validate engine with AOSS\n engine = getattr(self, \"engine\", \"jvector\")\n self._validate_aoss_with_engines(is_aoss=is_aoss, engine=engine)\n\n # Create mapping with proper KNN settings\n space_type = getattr(self, \"space_type\", \"l2\")\n ef_construction = getattr(self, \"ef_construction\", 512)\n m = getattr(self, \"m\", 16)\n\n mapping = self._default_text_mapping(\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n vector_field=dynamic_field_name, # Use dynamic field name\n )\n\n # Ensure index exists with baseline mapping\n try:\n if not client.indices.exists(index=self.index_name):\n self.log(f\"Creating index '{self.index_name}' with base mapping\")\n client.indices.create(index=self.index_name, body=mapping)\n except RequestError as creation_error:\n if creation_error.error != \"resource_already_exists_exception\":\n logger.warning(f\"Failed to create index '{self.index_name}': {creation_error}\")\n\n # Ensure the dynamic field exists in the index\n self._ensure_embedding_field_mapping(\n client=client,\n index_name=self.index_name,\n field_name=dynamic_field_name,\n dim=dim,\n engine=engine,\n space_type=space_type,\n ef_construction=ef_construction,\n m=m,\n )\n\n self.log(f\"Indexing {len(texts)} documents into '{self.index_name}' with model '{embedding_model}'...\")\n logger.info(f\"Will store embeddings in field: {dynamic_field_name}\")\n logger.info(f\"Will tag documents with embedding_model: {embedding_model}\")\n\n # Use the bulk ingestion with model tracking\n return_ids = self._bulk_ingest_embeddings(\n client=client,\n index_name=self.index_name,\n embeddings=vectors,\n texts=texts,\n metadatas=metadatas,\n vector_field=dynamic_field_name, # Use dynamic field name\n text_field=\"text\",\n embedding_model=embedding_model, # Track the model\n mapping=mapping,\n is_aoss=is_aoss,\n )\n self.log(metadatas)\n\n logger.info(\n f\"Ingestion complete: Successfully indexed {len(return_ids)} documents with model '{embedding_model}'\"\n )\n self.log(f\"Successfully indexed {len(return_ids)} documents with model {embedding_model}.\")\n\n # ---------- helpers for filters ----------\n def _is_placeholder_term(self, term_obj: dict) -> bool:\n # term_obj like {\"filename\": \"__IMPOSSIBLE_VALUE__\"}\n return any(v == \"__IMPOSSIBLE_VALUE__\" for v in term_obj.values())\n\n def _coerce_filter_clauses(self, filter_obj: dict | None) -> list[dict]:\n \"\"\"Convert filter expressions into OpenSearch-compatible filter clauses.\n\n This method accepts two filter formats and converts them to standardized\n OpenSearch query clauses:\n\n Format A - Explicit filters:\n {\"filter\": [{\"term\": {\"field\": \"value\"}}, {\"terms\": {\"field\": [\"val1\", \"val2\"]}}],\n \"limit\": 10, \"score_threshold\": 1.5}\n\n Format B - Context-style mapping:\n {\"data_sources\": [\"file1.pdf\"], \"document_types\": [\"pdf\"], \"owners\": [\"user1\"]}\n\n Args:\n filter_obj: Filter configuration dictionary or None\n\n Returns:\n List of OpenSearch filter clauses (term/terms objects)\n Placeholder values with \"__IMPOSSIBLE_VALUE__\" are ignored\n \"\"\"\n if not filter_obj:\n return []\n\n # If it is a string, try to parse it once\n if isinstance(filter_obj, str):\n try:\n filter_obj = json.loads(filter_obj)\n except json.JSONDecodeError:\n # Not valid JSON - treat as no filters\n return []\n\n # Case A: already an explicit list/dict under \"filter\"\n if \"filter\" in filter_obj:\n raw = filter_obj[\"filter\"]\n if isinstance(raw, dict):\n raw = [raw]\n explicit_clauses: list[dict] = []\n for f in raw or []:\n if \"term\" in f and isinstance(f[\"term\"], dict) and not self._is_placeholder_term(f[\"term\"]):\n explicit_clauses.append(f)\n elif \"terms\" in f and isinstance(f[\"terms\"], dict):\n field, vals = next(iter(f[\"terms\"].items()))\n if isinstance(vals, list) and len(vals) > 0:\n explicit_clauses.append(f)\n return explicit_clauses\n\n # Case B: convert context-style maps into clauses\n field_mapping = {\n \"data_sources\": \"filename\",\n \"document_types\": \"mimetype\",\n \"owners\": \"owner\",\n }\n context_clauses: list[dict] = []\n for k, values in filter_obj.items():\n if not isinstance(values, list):\n continue\n field = field_mapping.get(k, k)\n if len(values) == 0:\n # Match-nothing placeholder (kept to mirror your tool semantics)\n context_clauses.append({\"term\": {field: \"__IMPOSSIBLE_VALUE__\"}})\n elif len(values) == 1:\n if values[0] != \"__IMPOSSIBLE_VALUE__\":\n context_clauses.append({\"term\": {field: values[0]}})\n else:\n context_clauses.append({\"terms\": {field: values}})\n return context_clauses\n\n def _detect_available_models(self, client: OpenSearch, filter_clauses: list[dict] | None = None) -> list[str]:\n \"\"\"Detect which embedding models have documents in the index.\n\n Uses aggregation to find all unique embedding_model values, optionally\n filtered to only documents matching the user's filter criteria.\n\n Args:\n client: OpenSearch client instance\n filter_clauses: Optional filter clauses to scope model detection\n\n Returns:\n List of embedding model names found in the index\n \"\"\"\n try:\n agg_query = {\"size\": 0, \"aggs\": {\"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}}}}\n\n # Apply filters to model detection if any exist\n if filter_clauses:\n agg_query[\"query\"] = {\"bool\": {\"filter\": filter_clauses}}\n\n logger.debug(f\"Model detection query: {agg_query}\")\n result = client.search(\n index=self.index_name,\n body=agg_query,\n params={\"terminate_after\": 0},\n )\n buckets = result.get(\"aggregations\", {}).get(\"embedding_models\", {}).get(\"buckets\", [])\n models = [b[\"key\"] for b in buckets if b[\"key\"]]\n\n # Log detailed bucket info for debugging\n logger.info(\n f\"Detected embedding models in corpus: {models}\"\n + (f\" (with {len(filter_clauses)} filters)\" if filter_clauses else \"\")\n )\n if not models:\n total_hits = result.get(\"hits\", {}).get(\"total\", {})\n total_count = total_hits.get(\"value\", 0) if isinstance(total_hits, dict) else total_hits\n logger.warning(\n f\"No embedding_model values found in index '{self.index_name}'. \"\n f\"Total docs in index: {total_count}. \"\n f\"This may indicate documents were indexed without the embedding_model field.\"\n )\n except (OpenSearchException, KeyError, ValueError) as e:\n logger.warning(f\"Failed to detect embedding models: {e}\")\n # Fallback to current model\n fallback_model = self._get_embedding_model_name()\n logger.info(f\"Using fallback model: {fallback_model}\")\n return [fallback_model]\n else:\n return models\n\n def _get_index_properties(self, client: OpenSearch) -> dict[str, Any] | None:\n \"\"\"Retrieve flattened mapping properties for the current index.\"\"\"\n try:\n mapping = client.indices.get_mapping(index=self.index_name)\n except OpenSearchException as e:\n logger.warning(\n f\"Failed to fetch mapping for index '{self.index_name}': {e}. Proceeding without mapping metadata.\"\n )\n return None\n\n properties: dict[str, Any] = {}\n for index_data in mapping.values():\n props = index_data.get(\"mappings\", {}).get(\"properties\", {})\n if isinstance(props, dict):\n properties.update(props)\n return properties\n\n def _is_knn_vector_field(self, properties: dict[str, Any] | None, field_name: str) -> bool:\n \"\"\"Check whether the field is mapped as a knn_vector.\"\"\"\n if not field_name:\n return False\n if properties is None:\n logger.warning(f\"Mapping metadata unavailable; assuming field '{field_name}' is usable.\")\n return True\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return False\n if field_def.get(\"type\") == \"knn_vector\":\n return True\n\n nested_props = field_def.get(\"properties\")\n return bool(isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\")\n\n def _get_field_dimension(self, properties: dict[str, Any] | None, field_name: str) -> int | None:\n \"\"\"Get the dimension of a knn_vector field from the index mapping.\n\n Args:\n properties: Index properties from mapping\n field_name: Name of the vector field\n\n Returns:\n Dimension of the field, or None if not found\n \"\"\"\n if not field_name or properties is None:\n return None\n\n field_def = properties.get(field_name)\n if not isinstance(field_def, dict):\n return None\n\n # Check direct knn_vector field\n if field_def.get(\"type\") == \"knn_vector\":\n return field_def.get(\"dimension\")\n\n # Check nested properties\n nested_props = field_def.get(\"properties\")\n if isinstance(nested_props, dict) and nested_props.get(\"type\") == \"knn_vector\":\n return nested_props.get(\"dimension\")\n\n return None\n\n # ---------- search (multi-model hybrid) ----------\n def search(self, query: str | None = None) -> list[dict[str, Any]]:\n \"\"\"Perform multi-model hybrid search combining multiple vector similarities and keyword matching.\n\n This method executes a sophisticated search that:\n 1. Auto-detects all embedding models present in the index\n 2. Generates query embeddings for ALL detected models in parallel\n 3. Combines multiple KNN queries using dis_max (picks best match)\n 4. Adds keyword search with fuzzy matching (30% weight)\n 5. Applies optional filtering and score thresholds\n 6. Returns aggregations for faceted search\n\n Search weights:\n - Semantic search (dis_max across all models): 70%\n - Keyword search: 30%\n\n Args:\n query: Search query string (used for both vector embedding and keyword search)\n\n Returns:\n List of search results with page_content, metadata, and relevance scores\n\n Raises:\n ValueError: If embedding component is not provided or filter JSON is invalid\n \"\"\"\n logger.info(self.ingest_data)\n client = self.build_client()\n q = (query or \"\").strip()\n\n # Parse optional filter expression\n filter_obj = None\n if getattr(self, \"filter_expression\", \"\") and self.filter_expression.strip():\n try:\n filter_obj = json.loads(self.filter_expression)\n except json.JSONDecodeError as e:\n msg = f\"Invalid filter_expression JSON: {e}\"\n raise ValueError(msg) from e\n\n if not self.embedding:\n msg = \"Embedding is required to run hybrid search (KNN + keyword).\"\n raise ValueError(msg)\n\n # Check if embedding is None (fail-safe mode)\n if self.embedding is None or (isinstance(self.embedding, list) and all(e is None for e in self.embedding)):\n logger.error(\"Embedding returned None (fail-safe mode enabled). Cannot perform search.\")\n return []\n\n # Build filter clauses first so we can use them in model detection\n filter_clauses = self._coerce_filter_clauses(filter_obj)\n\n # Detect available embedding models in the index (scoped by filters)\n available_models = self._detect_available_models(client, filter_clauses)\n\n if not available_models:\n logger.warning(\"No embedding models found in index, using current model\")\n available_models = [self._get_embedding_model_name()]\n\n # Generate embeddings for ALL detected models\n query_embeddings = {}\n\n # Normalize embedding to list\n embeddings_list = self.embedding if isinstance(self.embedding, list) else [self.embedding]\n # Filter out None values (fail-safe mode)\n embeddings_list = [e for e in embeddings_list if e is not None]\n\n if not embeddings_list:\n logger.error(\n \"No valid embeddings available after filtering None values (fail-safe mode). Cannot perform search.\"\n )\n return []\n\n # Create a comprehensive map of model names to embedding objects\n # Check all possible identifiers (deployment, model, model_id, model_name)\n # Also leverage available_models list from EmbeddingsWithModels\n # Handle duplicate identifiers by creating combined keys\n embedding_by_model = {}\n identifier_conflicts = {} # Track which identifiers have conflicts\n\n for idx, emb_obj in enumerate(embeddings_list):\n # Get all possible identifiers for this embedding\n identifiers = []\n deployment = getattr(emb_obj, \"deployment\", None)\n model = getattr(emb_obj, \"model\", None)\n model_id = getattr(emb_obj, \"model_id\", None)\n model_name = getattr(emb_obj, \"model_name\", None)\n dimensions = getattr(emb_obj, \"dimensions\", None)\n available_models_attr = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Embedding object {idx}: deployment={deployment}, model={model}, \"\n f\"model_id={model_id}, model_name={model_name}, dimensions={dimensions}, \"\n f\"available_models={available_models_attr}\"\n )\n\n # If this embedding has available_models dict, map all models to their dedicated instances\n if available_models_attr and isinstance(available_models_attr, dict):\n logger.info(\n f\"Embedding object {idx} provides {len(available_models_attr)} models via available_models dict\"\n )\n for model_name_key, dedicated_embedding in available_models_attr.items():\n if model_name_key and str(model_name_key).strip():\n model_str = str(model_name_key).strip()\n if model_str not in embedding_by_model:\n # Use the dedicated embedding instance from the dict\n embedding_by_model[model_str] = dedicated_embedding\n logger.info(f\"Mapped available model '{model_str}' to dedicated embedding instance\")\n else:\n # Conflict detected - track it\n if model_str not in identifier_conflicts:\n identifier_conflicts[model_str] = [embedding_by_model[model_str]]\n identifier_conflicts[model_str].append(dedicated_embedding)\n logger.warning(f\"Available model '{model_str}' has conflict - used by multiple embeddings\")\n\n # Also map traditional identifiers (for backward compatibility)\n if deployment:\n identifiers.append(str(deployment))\n if model:\n identifiers.append(str(model))\n if model_id:\n identifiers.append(str(model_id))\n if model_name:\n identifiers.append(str(model_name))\n\n # Map all identifiers to this embedding object\n for identifier in identifiers:\n if identifier not in embedding_by_model:\n embedding_by_model[identifier] = emb_obj\n logger.info(f\"Mapped identifier '{identifier}' to embedding object {idx}\")\n else:\n # Conflict detected - track it\n if identifier not in identifier_conflicts:\n identifier_conflicts[identifier] = [embedding_by_model[identifier]]\n identifier_conflicts[identifier].append(emb_obj)\n logger.warning(f\"Identifier '{identifier}' has conflict - used by multiple embeddings\")\n\n # For embeddings with model+deployment, create combined identifier\n # This helps when deployment is the same but model differs\n if deployment and model and deployment != model:\n combined_id = f\"{deployment}:{model}\"\n if combined_id not in embedding_by_model:\n embedding_by_model[combined_id] = emb_obj\n logger.info(f\"Created combined identifier '{combined_id}' for embedding object {idx}\")\n\n # Log conflicts\n if identifier_conflicts:\n logger.warning(\n f\"Found {len(identifier_conflicts)} conflicting identifiers. \"\n f\"Consider using combined format 'deployment:model' or specifying unique model names.\"\n )\n for conflict_id, emb_list in identifier_conflicts.items():\n logger.warning(f\" Conflict on '{conflict_id}': {len(emb_list)} embeddings use this identifier\")\n\n logger.info(f\"Generating embeddings for {len(available_models)} models in index\")\n logger.info(f\"Available embedding identifiers: {list(embedding_by_model.keys())}\")\n self.log(f\"[SEARCH] Models detected in index: {available_models}\")\n self.log(f\"[SEARCH] Available embedding identifiers: {list(embedding_by_model.keys())}\")\n\n # Track matching status for debugging\n matched_models = []\n unmatched_models = []\n\n for model_name in available_models:\n try:\n # Check if we have an embedding object for this model\n if model_name in embedding_by_model:\n # Use the matching embedding object directly\n emb_obj = embedding_by_model[model_name]\n emb_deployment = getattr(emb_obj, \"deployment\", None)\n emb_model = getattr(emb_obj, \"model\", None)\n emb_model_id = getattr(emb_obj, \"model_id\", None)\n emb_dimensions = getattr(emb_obj, \"dimensions\", None)\n emb_available_models = getattr(emb_obj, \"available_models\", None)\n\n logger.info(\n f\"Using embedding object for model '{model_name}': \"\n f\"deployment={emb_deployment}, model={emb_model}, model_id={emb_model_id}, \"\n f\"dimensions={emb_dimensions}\"\n )\n\n # Check if this is a dedicated instance from available_models dict\n if emb_available_models and isinstance(emb_available_models, dict):\n logger.info(\n f\"Model '{model_name}' using dedicated instance from available_models dict \"\n f\"(pre-configured with correct model and dimensions)\"\n )\n\n # Use the embedding instance directly - no model switching needed!\n vec = emb_obj.embed_query(q)\n query_embeddings[model_name] = vec\n matched_models.append(model_name)\n logger.info(f\"Generated embedding for model: {model_name} (actual dimensions: {len(vec)})\")\n self.log(f\"[MATCH] Model '{model_name}' - generated {len(vec)}-dim embedding\")\n else:\n # No matching embedding found for this model\n unmatched_models.append(model_name)\n logger.warning(\n f\"No matching embedding found for model '{model_name}'. \"\n f\"This model will be skipped. Available identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[NO MATCH] Model '{model_name}' - available: {list(embedding_by_model.keys())}\")\n except (RuntimeError, ValueError, ConnectionError, TimeoutError, AttributeError, KeyError) as e:\n logger.warning(f\"Failed to generate embedding for {model_name}: {e}\")\n self.log(f\"[ERROR] Embedding generation failed for '{model_name}': {e}\")\n\n # Log summary of model matching\n logger.info(f\"Model matching summary: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n self.log(f\"[SUMMARY] Model matching: {len(matched_models)} matched, {len(unmatched_models)} unmatched\")\n if unmatched_models:\n self.log(f\"[WARN] Unmatched models in index: {unmatched_models}\")\n\n if not query_embeddings:\n msg = (\n f\"Failed to generate embeddings for any model. \"\n f\"Index has models: {available_models}, but no matching embedding objects found. \"\n f\"Available embedding identifiers: {list(embedding_by_model.keys())}\"\n )\n self.log(f\"[FAIL] Search failed: {msg}\")\n raise ValueError(msg)\n\n index_properties = self._get_index_properties(client)\n legacy_vector_field = getattr(self, \"vector_field\", \"chunk_embedding\")\n\n # Build KNN queries for each model\n embedding_fields: list[str] = []\n knn_queries_with_candidates = []\n knn_queries_without_candidates = []\n\n raw_num_candidates = getattr(self, \"num_candidates\", 1000)\n try:\n num_candidates = int(raw_num_candidates) if raw_num_candidates is not None else 0\n except (TypeError, ValueError):\n num_candidates = 0\n use_num_candidates = num_candidates > 0\n\n for model_name, embedding_vector in query_embeddings.items():\n field_name = get_embedding_field_name(model_name)\n selected_field = field_name\n vector_dim = len(embedding_vector)\n\n # Only use the expected dynamic field - no legacy fallback\n # This prevents dimension mismatches between models\n if not self._is_knn_vector_field(index_properties, selected_field):\n logger.warning(\n f\"Skipping model {model_name}: field '{field_name}' is not mapped as knn_vector. \"\n f\"Documents must be indexed with this embedding model before querying.\"\n )\n self.log(f\"[SKIP] Field '{selected_field}' not a knn_vector - skipping model '{model_name}'\")\n continue\n\n # Validate vector dimensions match the field dimensions\n field_dim = self._get_field_dimension(index_properties, selected_field)\n if field_dim is not None and field_dim != vector_dim:\n logger.error(\n f\"Dimension mismatch for model '{model_name}': \"\n f\"Query vector has {vector_dim} dimensions but field '{selected_field}' expects {field_dim}. \"\n f\"Skipping this model to prevent search errors.\"\n )\n self.log(f\"[DIM MISMATCH] Model '{model_name}': query={vector_dim} vs field={field_dim} - skipping\")\n continue\n\n logger.info(\n f\"Adding KNN query for model '{model_name}': field='{selected_field}', \"\n f\"query_dims={vector_dim}, field_dims={field_dim or 'unknown'}\"\n )\n embedding_fields.append(selected_field)\n\n base_query = {\n \"knn\": {\n selected_field: {\n \"vector\": embedding_vector,\n \"k\": 50,\n }\n }\n }\n\n if use_num_candidates:\n query_with_candidates = copy.deepcopy(base_query)\n query_with_candidates[\"knn\"][selected_field][\"num_candidates\"] = num_candidates\n else:\n query_with_candidates = base_query\n\n knn_queries_with_candidates.append(query_with_candidates)\n knn_queries_without_candidates.append(base_query)\n\n if not knn_queries_with_candidates:\n # No valid fields found - this can happen when:\n # 1. Index is empty (no documents yet)\n # 2. Embedding model has changed and field doesn't exist yet\n # Return empty results instead of failing\n logger.warning(\n \"No valid knn_vector fields found for embedding models. \"\n \"This may indicate an empty index or missing field mappings. \"\n \"Returning empty search results.\"\n )\n self.log(\n f\"[WARN] No valid KNN queries could be built. \"\n f\"Query embeddings generated: {list(query_embeddings.keys())}, \"\n f\"but no matching knn_vector fields found in index.\"\n )\n return []\n\n # Build exists filter - document must have at least one embedding field\n exists_any_embedding = {\n \"bool\": {\"should\": [{\"exists\": {\"field\": f}} for f in set(embedding_fields)], \"minimum_should_match\": 1}\n }\n\n # Combine user filters with exists filter\n all_filters = [*filter_clauses, exists_any_embedding]\n\n # Get limit and score threshold\n limit = (filter_obj or {}).get(\"limit\", self.number_of_results)\n score_threshold = (filter_obj or {}).get(\"score_threshold\", 0)\n\n # Build multi-model hybrid query\n body = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"dis_max\": {\n \"tie_breaker\": 0.0, # Take only the best match, no blending\n \"boost\": 0.7, # 70% weight for semantic search\n \"queries\": knn_queries_with_candidates,\n }\n },\n {\n \"multi_match\": {\n \"query\": q,\n \"fields\": [\"text^2\", \"filename^1.5\"],\n \"type\": \"best_fields\",\n \"fuzziness\": \"AUTO\",\n \"boost\": 0.3, # 30% weight for keyword search\n }\n },\n ],\n \"minimum_should_match\": 1,\n \"filter\": all_filters,\n }\n },\n \"aggs\": {\n \"data_sources\": {\"terms\": {\"field\": \"filename\", \"size\": 20}},\n \"document_types\": {\"terms\": {\"field\": \"mimetype\", \"size\": 10}},\n \"owners\": {\"terms\": {\"field\": \"owner\", \"size\": 10}},\n \"embedding_models\": {\"terms\": {\"field\": \"embedding_model\", \"size\": 10}},\n },\n \"_source\": [\n \"filename\",\n \"mimetype\",\n \"page\",\n \"text\",\n \"source_url\",\n \"owner\",\n \"embedding_model\",\n \"allowed_users\",\n \"allowed_groups\",\n ],\n \"size\": limit,\n }\n\n if isinstance(score_threshold, (int, float)) and score_threshold > 0:\n body[\"min_score\"] = score_threshold\n\n logger.info(\n f\"Executing multi-model hybrid search with {len(knn_queries_with_candidates)} embedding models: \"\n f\"{list(query_embeddings.keys())}\"\n )\n self.log(f\"[EXEC] Executing search with {len(knn_queries_with_candidates)} KNN queries, limit={limit}\")\n self.log(f\"[EXEC] Embedding models used: {list(query_embeddings.keys())}\")\n self.log(f\"[EXEC] KNN fields being queried: {embedding_fields}\")\n\n try:\n resp = client.search(index=self.index_name, body=body, params={\"terminate_after\": 0})\n except RequestError as e:\n error_message = str(e)\n lowered = error_message.lower()\n if use_num_candidates and \"num_candidates\" in lowered:\n logger.warning(\n \"Retrying search without num_candidates parameter due to cluster capabilities\",\n error=error_message,\n )\n fallback_body = copy.deepcopy(body)\n try:\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = knn_queries_without_candidates\n except (KeyError, IndexError, TypeError) as inner_err:\n raise e from inner_err\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n elif \"knn_vector\" in lowered or (\"field\" in lowered and \"knn\" in lowered):\n fallback_vector = next(iter(query_embeddings.values()), None)\n if fallback_vector is None:\n raise\n fallback_field = legacy_vector_field or \"chunk_embedding\"\n logger.warning(\n \"KNN search failed for dynamic fields; falling back to legacy field '%s'.\",\n fallback_field,\n )\n fallback_body = copy.deepcopy(body)\n fallback_body[\"query\"][\"bool\"][\"filter\"] = filter_clauses\n knn_fallback = {\n \"knn\": {\n fallback_field: {\n \"vector\": fallback_vector,\n \"k\": 50,\n }\n }\n }\n if use_num_candidates:\n knn_fallback[\"knn\"][fallback_field][\"num_candidates\"] = num_candidates\n fallback_body[\"query\"][\"bool\"][\"should\"][0][\"dis_max\"][\"queries\"] = [knn_fallback]\n resp = client.search(\n index=self.index_name,\n body=fallback_body,\n params={\"terminate_after\": 0},\n )\n else:\n raise\n hits = resp.get(\"hits\", {}).get(\"hits\", [])\n\n logger.info(f\"Found {len(hits)} results\")\n self.log(f\"[RESULT] Search complete: {len(hits)} results found\")\n\n if len(hits) == 0:\n self.log(\n f\"[EMPTY] Debug info: \"\n f\"models_in_index={available_models}, \"\n f\"matched_models={matched_models}, \"\n f\"knn_fields={embedding_fields}, \"\n f\"filters={len(filter_clauses)} clauses\"\n )\n\n return [\n {\n \"page_content\": hit[\"_source\"].get(\"text\", \"\"),\n \"metadata\": {k: v for k, v in hit[\"_source\"].items() if k != \"text\"},\n \"score\": hit.get(\"_score\"),\n }\n for hit in hits\n ]\n\n def search_documents(self) -> list[Data]:\n \"\"\"Search documents and return results as Data objects.\n\n This is the main interface method that performs the multi-model search using the\n configured search_query and returns results in Langflow's Data format.\n\n Always builds the vector store (triggering ingestion if needed), then performs\n search only if a query is provided.\n\n Returns:\n List of Data objects containing search results with text and metadata\n\n Raises:\n Exception: If search operation fails\n \"\"\"\n try:\n # Always build/cache the vector store to ensure ingestion happens\n logger.info(f\"Search query: {self.search_query}\")\n if self._cached_vector_store is None:\n self.build_vector_store()\n\n # Only perform search if query is provided\n search_query = (self.search_query or \"\").strip()\n if not search_query:\n self.log(\"No search query provided - ingestion completed, returning empty results\")\n return []\n\n # Perform search with the provided query\n raw = self.search(search_query)\n return [Data(text=hit[\"page_content\"], **hit[\"metadata\"]) for hit in raw]\n except Exception as e:\n self.log(f\"search_documents error: {e}\")\n raise\n\n # -------- dynamic UI handling (auth switch) --------\n async def update_build_config(self, build_config: dict, field_value: str, field_name: str | None = None) -> dict:\n \"\"\"Dynamically update component configuration based on field changes.\n\n This method handles real-time UI updates, particularly for authentication\n mode changes that show/hide relevant input fields.\n\n Args:\n build_config: Current component configuration\n field_value: New value for the changed field\n field_name: Name of the field that changed\n\n Returns:\n Updated build configuration with appropriate field visibility\n \"\"\"\n try:\n if field_name == \"auth_mode\":\n mode = (field_value or \"basic\").strip().lower()\n is_basic = mode == \"basic\"\n is_jwt = mode == \"jwt\"\n\n build_config[\"username\"][\"show\"] = is_basic\n build_config[\"password\"][\"show\"] = is_basic\n\n build_config[\"jwt_token\"][\"show\"] = is_jwt\n build_config[\"jwt_header\"][\"show\"] = is_jwt\n build_config[\"bearer_prefix\"][\"show\"] = is_jwt\n\n build_config[\"username\"][\"required\"] = is_basic\n build_config[\"password\"][\"required\"] = is_basic\n\n build_config[\"jwt_token\"][\"required\"] = is_jwt\n build_config[\"jwt_header\"][\"required\"] = is_jwt\n build_config[\"bearer_prefix\"][\"required\"] = False\n\n return build_config\n\n except (KeyError, ValueError) as e:\n self.log(f\"update_build_config error: {e}\")\n\n return build_config\n" }, "docs_metadata": { "_input_type": "TableInput", diff --git a/frontend/app/api/mutations/useOnboardingRollbackMutation.ts b/frontend/app/api/mutations/useOnboardingRollbackMutation.ts new file mode 100644 index 00000000..1b8a0754 --- /dev/null +++ b/frontend/app/api/mutations/useOnboardingRollbackMutation.ts @@ -0,0 +1,44 @@ +import { + type UseMutationOptions, + useMutation, + useQueryClient, +} from "@tanstack/react-query"; + +interface OnboardingRollbackResponse { + message: string; +} + +export const useOnboardingRollbackMutation = ( + options?: Omit< + UseMutationOptions, + "mutationFn" + >, +) => { + const queryClient = useQueryClient(); + + async function rollbackOnboarding(): Promise { + const response = await fetch("/api/onboarding/rollback", { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + }); + + if (!response.ok) { + const error = await response.json(); + throw new Error(error.error || "Failed to rollback onboarding"); + } + + return response.json(); + } + + return useMutation({ + mutationFn: rollbackOnboarding, + onSettled: () => { + // Invalidate settings query to refetch updated data + queryClient.invalidateQueries({ queryKey: ["settings"] }); + }, + ...options, + }); +}; + diff --git a/frontend/app/api/queries/useGetConversationsQuery.ts b/frontend/app/api/queries/useGetConversationsQuery.ts index d77b7eff..66862605 100644 --- a/frontend/app/api/queries/useGetConversationsQuery.ts +++ b/frontend/app/api/queries/useGetConversationsQuery.ts @@ -4,6 +4,7 @@ import { useQueryClient, } from "@tanstack/react-query"; import type { EndpointType } from "@/contexts/chat-context"; +import { useChat } from "@/contexts/chat-context"; export interface RawConversation { response_id: string; @@ -50,6 +51,7 @@ export const useGetConversationsQuery = ( options?: Omit, ) => { const queryClient = useQueryClient(); + const { isOnboardingComplete } = useChat(); async function getConversations(context: { signal?: AbortSignal }): Promise { try { @@ -95,6 +97,11 @@ export const useGetConversationsQuery = ( } } + // Extract enabled from options and combine with onboarding completion check + // Query is only enabled if onboarding is complete AND the caller's enabled condition is met + const callerEnabled = options?.enabled ?? true; + const enabled = isOnboardingComplete && callerEnabled; + const queryResult = useQuery( { queryKey: ["conversations", endpoint, refreshTrigger], @@ -106,6 +113,7 @@ export const useGetConversationsQuery = ( refetchOnMount: false, // Don't refetch on every mount refetchOnWindowFocus: false, // Don't refetch when window regains focus ...options, + enabled, // Override enabled after spreading options to ensure onboarding check is applied }, queryClient, ); diff --git a/frontend/app/api/queries/useGetNudgesQuery.ts b/frontend/app/api/queries/useGetNudgesQuery.ts index 05c97bde..ac4c1fde 100644 --- a/frontend/app/api/queries/useGetNudgesQuery.ts +++ b/frontend/app/api/queries/useGetNudgesQuery.ts @@ -3,6 +3,8 @@ import { useQuery, useQueryClient, } from "@tanstack/react-query"; +import { useChat } from "@/contexts/chat-context"; +import { useProviderHealthQuery } from "./useProviderHealthQuery"; type Nudge = string; @@ -27,6 +29,13 @@ export const useGetNudgesQuery = ( ) => { const { chatId, filters, limit, scoreThreshold } = params ?? {}; const queryClient = useQueryClient(); + const { isOnboardingComplete } = useChat(); + + // Check if LLM provider is healthy + // If health data is not available yet, assume healthy (optimistic) + // Only disable if health data exists and shows LLM error + const { data: health } = useProviderHealthQuery(); + const isLLMHealthy = health === undefined || (health?.status === "healthy" && !health?.llm_error); function cancel() { queryClient.removeQueries({ @@ -77,6 +86,11 @@ export const useGetNudgesQuery = ( } } + // Extract enabled from options and combine with onboarding completion and LLM health checks + // Query is only enabled if onboarding is complete AND LLM provider is healthy AND the caller's enabled condition is met + const callerEnabled = options?.enabled ?? true; + const enabled = isOnboardingComplete && isLLMHealthy && callerEnabled; + const queryResult = useQuery( { queryKey: ["nudges", chatId, filters, limit, scoreThreshold], @@ -91,6 +105,7 @@ export const useGetNudgesQuery = ( return Array.isArray(data) && data.length === 0 ? 5000 : false; }, ...options, + enabled, // Override enabled after spreading options to ensure onboarding check is applied }, queryClient, ); diff --git a/frontend/app/api/queries/useProviderHealthQuery.ts b/frontend/app/api/queries/useProviderHealthQuery.ts index 6586e6dd..ab34e9b1 100644 --- a/frontend/app/api/queries/useProviderHealthQuery.ts +++ b/frontend/app/api/queries/useProviderHealthQuery.ts @@ -5,6 +5,7 @@ import { } from "@tanstack/react-query"; import { useChat } from "@/contexts/chat-context"; import { useGetSettingsQuery } from "./useGetSettingsQuery"; +import { useGetTasksQuery } from "./useGetTasksQuery"; export interface ProviderHealthDetails { llm_model: string; @@ -40,11 +41,20 @@ export const useProviderHealthQuery = ( ) => { const queryClient = useQueryClient(); - // Get chat error state from context (ChatProvider wraps the entire app in layout.tsx) - const { hasChatError, setChatError } = useChat(); + // Get chat error state and onboarding completion from context (ChatProvider wraps the entire app in layout.tsx) + const { hasChatError, setChatError, isOnboardingComplete } = useChat(); const { data: settings = {} } = useGetSettingsQuery(); + // Check if there are any active ingestion tasks + const { data: tasks = [] } = useGetTasksQuery(); + const hasActiveIngestion = tasks.some( + (task) => + task.status === "pending" || + task.status === "running" || + task.status === "processing", + ); + async function checkProviderHealth(): Promise { try { const url = new URL("/api/provider/health", window.location.origin); @@ -55,6 +65,7 @@ export const useProviderHealthQuery = ( } // Add test_completion query param if specified or if chat error exists + // Use the same testCompletion value that's in the queryKey const testCompletion = params?.test_completion ?? hasChatError; if (testCompletion) { url.searchParams.set("test_completion", "true"); @@ -101,7 +112,10 @@ export const useProviderHealthQuery = ( } } - const queryKey = ["provider", "health", params?.test_completion]; + // Include hasChatError in queryKey so React Query refetches when it changes + // This ensures the health check runs with test_completion=true when chat errors occur + const testCompletion = params?.test_completion ?? hasChatError; + const queryKey = ["provider", "health", testCompletion, hasChatError]; const failureCountKey = queryKey.join("-"); const queryResult = useQuery( @@ -143,7 +157,11 @@ export const useProviderHealthQuery = ( refetchOnWindowFocus: false, // Disabled to reduce unnecessary calls on tab switches refetchOnMount: true, staleTime: 30000, // Consider data stale after 30 seconds - enabled: !!settings?.edited && options?.enabled !== false, // Only run after onboarding is complete + enabled: + !!settings?.edited && + isOnboardingComplete && + !hasActiveIngestion && // Disable health checks when ingestion is happening + options?.enabled !== false, // Only run after onboarding is complete ...options, }, queryClient, diff --git a/frontend/app/chat/page.tsx b/frontend/app/chat/page.tsx index 87ae6b60..f15cf788 100644 --- a/frontend/app/chat/page.tsx +++ b/frontend/app/chat/page.tsx @@ -10,1240 +10,1271 @@ import { useTask } from "@/contexts/task-context"; import { useChatStreaming } from "@/hooks/useChatStreaming"; import { FILE_CONFIRMATION, FILES_REGEX } from "@/lib/constants"; import { useLoadingStore } from "@/stores/loadingStore"; +import { useGetConversationsQuery } from "../api/queries/useGetConversationsQuery"; import { useGetNudgesQuery } from "../api/queries/useGetNudgesQuery"; import { AssistantMessage } from "./_components/assistant-message"; import { ChatInput, type ChatInputHandle } from "./_components/chat-input"; import Nudges from "./_components/nudges"; import { UserMessage } from "./_components/user-message"; import type { - FunctionCall, - KnowledgeFilterData, - Message, - RequestBody, - SelectedFilters, - ToolCallResult, + FunctionCall, + KnowledgeFilterData, + Message, + RequestBody, + SelectedFilters, + ToolCallResult, } from "./_types/types"; function ChatPage() { - const isDebugMode = process.env.NEXT_PUBLIC_OPENRAG_DEBUG === "true"; - const { - endpoint, - setEndpoint, - currentConversationId, - conversationData, - setCurrentConversationId, - addConversationDoc, - forkFromResponse, - refreshConversations, - refreshConversationsSilent, - previousResponseIds, - setPreviousResponseIds, - placeholderConversation, - conversationFilter, - setConversationFilter, - } = useChat(); - const [messages, setMessages] = useState([ - { - role: "assistant", - content: "How can I assist?", - timestamp: new Date(), - }, - ]); - const [input, setInput] = useState(""); - const { loading, setLoading } = useLoadingStore(); - const { setChatError } = useChat(); - const [asyncMode, setAsyncMode] = useState(true); - const [expandedFunctionCalls, setExpandedFunctionCalls] = useState< - Set - >(new Set()); - // previousResponseIds now comes from useChat context - const [isUploading, setIsUploading] = useState(false); - const [isFilterHighlighted, setIsFilterHighlighted] = useState(false); - const [isUserInteracting, setIsUserInteracting] = useState(false); - const [isForkingInProgress, setIsForkingInProgress] = useState(false); - const [uploadedFile, setUploadedFile] = useState(null); - const [waitingTooLong, setWaitingTooLong] = useState(false); - - const chatInputRef = useRef(null); - - const { scrollToBottom } = useStickToBottomContext(); - - const lastLoadedConversationRef = useRef(null); - const { addTask } = useTask(); - - // Use conversation-specific filter instead of global filter - const selectedFilter = conversationFilter; - - // Parse the conversation filter data - const parsedFilterData = useMemo(() => { - if (!selectedFilter?.query_data) return null; - try { - return JSON.parse(selectedFilter.query_data); - } catch (error) { - console.error("Error parsing filter data:", error); - return null; - } - }, [selectedFilter]); - - // Use the chat streaming hook - const apiEndpoint = endpoint === "chat" ? "/api/chat" : "/api/langflow"; - const { - streamingMessage, - sendMessage: sendStreamingMessage, - abortStream, - isLoading: isStreamLoading, - } = useChatStreaming({ - endpoint: apiEndpoint, - onComplete: (message, responseId) => { - setMessages((prev) => [...prev, message]); - setLoading(false); - setWaitingTooLong(false); - if (responseId) { - cancelNudges(); - setPreviousResponseIds((prev) => ({ - ...prev, - [endpoint]: responseId, - })); - - if (!currentConversationId) { - setCurrentConversationId(responseId); - refreshConversations(true); - } else { - refreshConversationsSilent(); - } - - // Save filter association for this response - if (conversationFilter && typeof window !== "undefined") { - const newKey = `conversation_filter_${responseId}`; - localStorage.setItem(newKey, conversationFilter.id); - console.log("[CHAT] Saved filter association:", newKey, "=", conversationFilter.id); - } - } - }, - onError: (error) => { - console.error("Streaming error:", error); - setLoading(false); - setWaitingTooLong(false); - // Set chat error flag to trigger test_completion=true on health checks - setChatError(true); - const errorMessage: Message = { - role: "assistant", - content: - "Sorry, I couldn't connect to the chat service. Please try again.", - timestamp: new Date(), - }; - setMessages((prev) => [...prev, errorMessage]); - }, - }); - - // Show warning if waiting too long (20 seconds) - useEffect(() => { - let timeoutId: NodeJS.Timeout | null = null; - - if (isStreamLoading && !streamingMessage) { - timeoutId = setTimeout(() => { - setWaitingTooLong(true); - }, 20000); // 20 seconds - } else { - setWaitingTooLong(false); - } - - return () => { - if (timeoutId) clearTimeout(timeoutId); - }; - }, [isStreamLoading, streamingMessage]); - - const handleEndpointChange = (newEndpoint: EndpointType) => { - setEndpoint(newEndpoint); - // Clear the conversation when switching endpoints to avoid response ID conflicts - setMessages([]); - setPreviousResponseIds({ chat: null, langflow: null }); - }; - - const handleFileUpload = async (file: File) => { - console.log("handleFileUpload called with file:", file.name); - - if (isUploading) return; - - setIsUploading(true); - setLoading(true); - - try { - const formData = new FormData(); - formData.append("file", file); - formData.append("endpoint", endpoint); - - // Add previous_response_id if we have one for this endpoint - const currentResponseId = previousResponseIds[endpoint]; - if (currentResponseId) { - formData.append("previous_response_id", currentResponseId); - } - - const response = await fetch("/api/upload_context", { - method: "POST", - body: formData, - }); - - console.log("Upload response status:", response.status); - - if (!response.ok) { - const errorText = await response.text(); - console.error( - "Upload failed with status:", - response.status, - "Response:", - errorText, - ); - throw new Error("Failed to process document"); - } - - const result = await response.json(); - console.log("Upload result:", result); - - if (!response.ok) { - // Set chat error flag if upload fails - setChatError(true); - } - - if (response.status === 201) { - // New flow: Got task ID, start tracking with centralized system - const taskId = result.task_id || result.id; - - if (!taskId) { - console.error("No task ID in 201 response:", result); - throw new Error("No task ID received from server"); - } - - // Add task to centralized tracking - addTask(taskId); - - return null; - } else if (response.ok) { - // Original flow: Direct response - - const uploadMessage: Message = { - role: "user", - content: `I'm uploading a document called "${result.filename}". Here is its content:`, - timestamp: new Date(), - }; - - const confirmationMessage: Message = { - role: "assistant", - content: `Confirmed`, - timestamp: new Date(), - }; - - setMessages((prev) => [...prev, uploadMessage, confirmationMessage]); - - // Add file to conversation docs - if (result.filename) { - addConversationDoc(result.filename); - } - - // Update the response ID for this endpoint - if (result.response_id) { - setPreviousResponseIds((prev) => ({ - ...prev, - [endpoint]: result.response_id, - })); - - // If this is a new conversation (no currentConversationId), set it now - if (!currentConversationId) { - setCurrentConversationId(result.response_id); - refreshConversations(true); - } else { - // For existing conversations, do a silent refresh to keep backend in sync - refreshConversationsSilent(); - } - - return result.response_id; - } - } else { - throw new Error(`Upload failed: ${response.status}`); - } - } catch (error) { - console.error("Upload failed:", error); - // Set chat error flag to trigger test_completion=true on health checks - setChatError(true); - const errorMessage: Message = { - role: "assistant", - content: `❌ Failed to process document. Please try again.`, - timestamp: new Date(), - }; - setMessages((prev) => [...prev.slice(0, -1), errorMessage]); - } finally { - setIsUploading(false); - setLoading(false); - } - }; - - const handleFilePickerClick = () => { - chatInputRef.current?.clickFileInput(); - }; - - const handleFilterSelect = (filter: KnowledgeFilterData | null) => { - // Update conversation-specific filter - setConversationFilter(filter); - setIsFilterHighlighted(false); - }; - - // Auto-focus the input on component mount - useEffect(() => { - chatInputRef.current?.focusInput(); - }, []); - - // Explicitly handle external new conversation trigger - useEffect(() => { - const handleNewConversation = () => { - // Abort any in-flight streaming so it doesn't bleed into new chat - abortStream(); - // Reset chat UI even if context state was already 'new' - setMessages([ - { - role: "assistant", - content: "How can I assist?", - timestamp: new Date(), - }, - ]); - setInput(""); - setExpandedFunctionCalls(new Set()); - setIsFilterHighlighted(false); - setLoading(false); - lastLoadedConversationRef.current = null; - - // Focus input after a short delay to ensure rendering is complete - setTimeout(() => { - chatInputRef.current?.focusInput(); - }, 100); - }; - - const handleFocusInput = () => { - chatInputRef.current?.focusInput(); - }; - - window.addEventListener("newConversation", handleNewConversation); - window.addEventListener("focusInput", handleFocusInput); - return () => { - window.removeEventListener("newConversation", handleNewConversation); - window.removeEventListener("focusInput", handleFocusInput); - }; - }, [abortStream, setLoading]); - - // Load conversation only when user explicitly selects a conversation - useEffect(() => { - // Only load conversation data when: - // 1. conversationData exists AND - // 2. It's different from the last loaded conversation AND - // 3. User is not in the middle of an interaction - if ( - conversationData?.messages && - lastLoadedConversationRef.current !== conversationData.response_id && - !isUserInteracting && - !isForkingInProgress - ) { - console.log( - "Loading conversation with", - conversationData.messages.length, - "messages", - ); - // Convert backend message format to frontend Message interface - const convertedMessages: Message[] = conversationData.messages.map( - (msg: { - role: string; - content: string; - timestamp?: string; - response_id?: string; - chunks?: Array<{ - item?: { - type?: string; - tool_name?: string; - id?: string; - inputs?: unknown; - results?: unknown; - status?: string; - }; - delta?: { - tool_calls?: Array<{ - id?: string; - function?: { name?: string; arguments?: string }; - type?: string; - }>; - }; - type?: string; - result?: unknown; - output?: unknown; - response?: unknown; - }>; - response_data?: unknown; - }) => { - const message: Message = { - role: msg.role as "user" | "assistant", - content: msg.content, - timestamp: new Date(msg.timestamp || new Date()), - }; - - // Extract function calls from chunks or response_data - if (msg.role === "assistant" && (msg.chunks || msg.response_data)) { - const functionCalls: FunctionCall[] = []; - console.log("Processing assistant message for function calls:", { - hasChunks: !!msg.chunks, - chunksLength: msg.chunks?.length, - hasResponseData: !!msg.response_data, - }); - - // Process chunks (streaming data) - if (msg.chunks && Array.isArray(msg.chunks)) { - for (const chunk of msg.chunks) { - // Handle Langflow format: chunks[].item.tool_call - if (chunk.item && chunk.item.type === "tool_call") { - const toolCall = chunk.item; - console.log("Found Langflow tool call:", toolCall); - functionCalls.push({ - id: toolCall.id || "", - name: toolCall.tool_name || "unknown", - arguments: - (toolCall.inputs as Record) || {}, - argumentsString: JSON.stringify(toolCall.inputs || {}), - result: toolCall.results as - | Record - | ToolCallResult[], - status: - (toolCall.status as "pending" | "completed" | "error") || - "completed", - type: "tool_call", - }); - } - // Handle OpenAI format: chunks[].delta.tool_calls - else if (chunk.delta?.tool_calls) { - for (const toolCall of chunk.delta.tool_calls) { - if (toolCall.function) { - functionCalls.push({ - id: toolCall.id || "", - name: toolCall.function.name || "unknown", - arguments: toolCall.function.arguments - ? JSON.parse(toolCall.function.arguments) - : {}, - argumentsString: toolCall.function.arguments || "", - status: "completed", - type: toolCall.type || "function", - }); - } - } - } - // Process tool call results from chunks - if ( - chunk.type === "response.tool_call.result" || - chunk.type === "tool_call_result" - ) { - const lastCall = functionCalls[functionCalls.length - 1]; - if (lastCall) { - lastCall.result = - (chunk.result as - | Record - | ToolCallResult[]) || - (chunk as Record); - lastCall.status = "completed"; - } - } - } - } - - // Process response_data (non-streaming data) - if (msg.response_data && typeof msg.response_data === "object") { - // Look for tool_calls in various places in the response data - const responseData = - typeof msg.response_data === "string" - ? JSON.parse(msg.response_data) - : msg.response_data; - - if ( - responseData.tool_calls && - Array.isArray(responseData.tool_calls) - ) { - for (const toolCall of responseData.tool_calls) { - functionCalls.push({ - id: toolCall.id, - name: toolCall.function?.name || toolCall.name, - arguments: - toolCall.function?.arguments || toolCall.arguments, - argumentsString: - typeof ( - toolCall.function?.arguments || toolCall.arguments - ) === "string" - ? toolCall.function?.arguments || toolCall.arguments - : JSON.stringify( - toolCall.function?.arguments || toolCall.arguments, - ), - result: toolCall.result, - status: "completed", - type: toolCall.type || "function", - }); - } - } - } - - if (functionCalls.length > 0) { - console.log("Setting functionCalls on message:", functionCalls); - message.functionCalls = functionCalls; - } else { - console.log("No function calls found in message"); - } - } - - return message; - }, - ); - - setMessages(convertedMessages); - lastLoadedConversationRef.current = conversationData.response_id; - - // Set the previous response ID for this conversation - setPreviousResponseIds((prev) => ({ - ...prev, - [conversationData.endpoint]: conversationData.response_id, - })); - - // Focus input when loading a conversation - setTimeout(() => { - chatInputRef.current?.focusInput(); - }, 100); - } - }, [ - conversationData, - isUserInteracting, - isForkingInProgress, - setPreviousResponseIds, - ]); - - // Handle new conversation creation - only reset messages when placeholderConversation is set - useEffect(() => { - if (placeholderConversation && currentConversationId === null) { - console.log("Starting new conversation"); - setMessages([ - { - role: "assistant", - content: "How can I assist?", - timestamp: new Date(), - }, - ]); - lastLoadedConversationRef.current = null; - - // Focus input when starting a new conversation - setTimeout(() => { - chatInputRef.current?.focusInput(); - }, 100); - } - }, [placeholderConversation, currentConversationId]); - - // Listen for file upload events from navigation - useEffect(() => { - const handleFileUploadStart = (event: CustomEvent) => { - const { filename } = event.detail; - console.log("Chat page received file upload start event:", filename); - - setLoading(true); - setIsUploading(true); - setUploadedFile(null); // Clear previous file - }; - - const handleFileUploaded = (event: CustomEvent) => { - const { result } = event.detail; - console.log("Chat page received file upload event:", result); - - setUploadedFile(null); // Clear file after upload - - // Update the response ID for this endpoint - if (result.response_id) { - setPreviousResponseIds((prev) => ({ - ...prev, - [endpoint]: result.response_id, - })); - } - }; - - const handleFileUploadComplete = () => { - console.log("Chat page received file upload complete event"); - setLoading(false); - setIsUploading(false); - }; - - const handleFileUploadError = (event: CustomEvent) => { - const { filename, error } = event.detail; - console.log( - "Chat page received file upload error event:", - filename, - error, - ); - - // Replace the last message with error message - const errorMessage: Message = { - role: "assistant", - content: `❌ Upload failed for **${filename}**: ${error}`, - timestamp: new Date(), - }; - setMessages((prev) => [...prev.slice(0, -1), errorMessage]); - setUploadedFile(null); // Clear file on error - }; - - window.addEventListener( - "fileUploadStart", - handleFileUploadStart as EventListener, - ); - window.addEventListener( - "fileUploaded", - handleFileUploaded as EventListener, - ); - window.addEventListener( - "fileUploadComplete", - handleFileUploadComplete as EventListener, - ); - window.addEventListener( - "fileUploadError", - handleFileUploadError as EventListener, - ); - - return () => { - window.removeEventListener( - "fileUploadStart", - handleFileUploadStart as EventListener, - ); - window.removeEventListener( - "fileUploaded", - handleFileUploaded as EventListener, - ); - window.removeEventListener( - "fileUploadComplete", - handleFileUploadComplete as EventListener, - ); - window.removeEventListener( - "fileUploadError", - handleFileUploadError as EventListener, - ); - }; - }, [endpoint, setPreviousResponseIds, setLoading]); - - // Check if onboarding is complete by looking at local storage - const [isOnboardingComplete, setIsOnboardingComplete] = useState(() => { - if (typeof window === "undefined") return false; - return localStorage.getItem("onboarding-step") === null; - }); - - // Listen for storage changes to detect when onboarding completes - useEffect(() => { - const checkOnboarding = () => { - if (typeof window !== "undefined") { - setIsOnboardingComplete( - localStorage.getItem("onboarding-step") === null, - ); - } - }; - - // Check periodically since storage events don't fire in the same tab - const interval = setInterval(checkOnboarding, 500); - - return () => clearInterval(interval); - }, []); - - // Prepare filters for nudges (same as chat) - const processedFiltersForNudges = parsedFilterData?.filters - ? (() => { - const filters = parsedFilterData.filters; - const processed: SelectedFilters = { - data_sources: [], - document_types: [], - owners: [], - }; - processed.data_sources = filters.data_sources.includes("*") - ? [] - : filters.data_sources; - processed.document_types = filters.document_types.includes("*") - ? [] - : filters.document_types; - processed.owners = filters.owners.includes("*") ? [] : filters.owners; - - const hasFilters = - processed.data_sources.length > 0 || - processed.document_types.length > 0 || - processed.owners.length > 0; - return hasFilters ? processed : undefined; - })() - : undefined; - - const { data: nudges = [], cancel: cancelNudges } = useGetNudgesQuery( - { - chatId: previousResponseIds[endpoint], - filters: processedFiltersForNudges, - limit: parsedFilterData?.limit ?? 3, - scoreThreshold: parsedFilterData?.scoreThreshold ?? 0, - }, - { - enabled: isOnboardingComplete, // Only fetch nudges after onboarding is complete - }, - ); - - const handleSSEStream = async ( - userMessage: Message, - previousResponseId?: string, - ) => { - // Prepare filters - const processedFilters = parsedFilterData?.filters - ? (() => { - const filters = parsedFilterData.filters; - const processed: SelectedFilters = { - data_sources: [], - document_types: [], - owners: [], - }; - processed.data_sources = filters.data_sources.includes("*") - ? [] - : filters.data_sources; - processed.document_types = filters.document_types.includes("*") - ? [] - : filters.document_types; - processed.owners = filters.owners.includes("*") ? [] : filters.owners; - - const hasFilters = - processed.data_sources.length > 0 || - processed.document_types.length > 0 || - processed.owners.length > 0; - return hasFilters ? processed : undefined; - })() - : undefined; - - // Use passed previousResponseId if available, otherwise fall back to state - const responseIdToUse = previousResponseId || previousResponseIds[endpoint]; - - console.log("[CHAT] Sending streaming message:", { - conversationFilter: conversationFilter?.id, - currentConversationId, - responseIdToUse, - }); - - // Use the hook to send the message - await sendStreamingMessage({ - prompt: userMessage.content, - previousResponseId: responseIdToUse || undefined, - filters: processedFilters, - filter_id: conversationFilter?.id, // ✅ Add filter_id for this conversation - limit: parsedFilterData?.limit ?? 10, - scoreThreshold: parsedFilterData?.scoreThreshold ?? 0, - }); - scrollToBottom({ - animation: "smooth", - duration: 1000, - }); - }; - - const handleSendMessage = async ( - inputMessage: string, - previousResponseId?: string, - ) => { - if (!inputMessage.trim() || loading) return; - - const userMessage: Message = { - role: "user", - content: inputMessage.trim(), - timestamp: new Date(), - }; - - if (messages.length === 1) { - setMessages([userMessage]); - } else { - setMessages((prev) => [...prev, userMessage]); - } - setInput(""); - setLoading(true); - setIsFilterHighlighted(false); - - scrollToBottom({ - animation: "smooth", - duration: 1000, - }); - - if (asyncMode) { - await handleSSEStream(userMessage, previousResponseId); - } else { - // Original non-streaming logic - try { - const apiEndpoint = endpoint === "chat" ? "/api/chat" : "/api/langflow"; - - const requestBody: RequestBody = { - prompt: userMessage.content, - ...(parsedFilterData?.filters && - (() => { - const filters = parsedFilterData.filters; - const processed: SelectedFilters = { - data_sources: [], - document_types: [], - owners: [], - }; - // Only copy non-wildcard arrays - processed.data_sources = filters.data_sources.includes("*") - ? [] - : filters.data_sources; - processed.document_types = filters.document_types.includes("*") - ? [] - : filters.document_types; - processed.owners = filters.owners.includes("*") - ? [] - : filters.owners; - - // Only include filters if any array has values - const hasFilters = - processed.data_sources.length > 0 || - processed.document_types.length > 0 || - processed.owners.length > 0; - return hasFilters ? { filters: processed } : {}; - })()), - limit: parsedFilterData?.limit ?? 10, - scoreThreshold: parsedFilterData?.scoreThreshold ?? 0, - }; - - // Add previous_response_id if we have one for this endpoint - const currentResponseId = previousResponseIds[endpoint]; - if (currentResponseId) { - requestBody.previous_response_id = currentResponseId; - } - - // Add filter_id if a filter is selected for this conversation - if (conversationFilter) { - requestBody.filter_id = conversationFilter.id; - } - - // Debug logging - console.log("[DEBUG] Sending message with:", { - previous_response_id: requestBody.previous_response_id, - filter_id: requestBody.filter_id, - currentConversationId, - previousResponseIds, - }); - - const response = await fetch(apiEndpoint, { - method: "POST", - headers: { - "Content-Type": "application/json", - }, - body: JSON.stringify(requestBody), - }); - - const result = await response.json(); - - if (response.ok) { - const assistantMessage: Message = { - role: "assistant", - content: result.response, - timestamp: new Date(), - }; - setMessages((prev) => [...prev, assistantMessage]); - if (result.response_id) { - cancelNudges(); - } - - // Store the response ID if present for this endpoint - if (result.response_id) { - console.log("[DEBUG] Received response_id:", result.response_id, "currentConversationId:", currentConversationId); - - setPreviousResponseIds((prev) => ({ - ...prev, - [endpoint]: result.response_id, - })); - - // If this is a new conversation (no currentConversationId), set it now - if (!currentConversationId) { - console.log("[DEBUG] Setting currentConversationId to:", result.response_id); - setCurrentConversationId(result.response_id); - refreshConversations(true); - } else { - console.log("[DEBUG] Existing conversation, doing silent refresh"); - // For existing conversations, do a silent refresh to keep backend in sync - refreshConversationsSilent(); - } - - // Carry forward the filter association to the new response_id - if (conversationFilter && typeof window !== "undefined") { - const newKey = `conversation_filter_${result.response_id}`; - localStorage.setItem(newKey, conversationFilter.id); - console.log("[DEBUG] Saved filter association:", newKey, "=", conversationFilter.id); - } - } - } else { - console.error("Chat failed:", result.error); - // Set chat error flag to trigger test_completion=true on health checks - setChatError(true); - const errorMessage: Message = { - role: "assistant", - content: "Sorry, I encountered an error. Please try again.", - timestamp: new Date(), - }; - setMessages((prev) => [...prev, errorMessage]); - } - } catch (error) { - console.error("Chat error:", error); - // Set chat error flag to trigger test_completion=true on health checks - setChatError(true); - const errorMessage: Message = { - role: "assistant", - content: - "Sorry, I couldn't connect to the chat service. Please try again.", - timestamp: new Date(), - }; - setMessages((prev) => [...prev, errorMessage]); - } - } - - setLoading(false); - }; - - const handleSubmit = async (e: React.FormEvent) => { - e.preventDefault(); - - // Check if there's an uploaded file and upload it first - let uploadedResponseId: string | null = null; - if (uploadedFile) { - // Upload the file first - const responseId = await handleFileUpload(uploadedFile); - // Clear the file after upload - setUploadedFile(null); - - // If the upload resulted in a new conversation, store the response ID - if (responseId) { - uploadedResponseId = responseId; - setPreviousResponseIds((prev) => ({ - ...prev, - [endpoint]: responseId, - })); - } - } - - // Only send message if there's input text - if (input.trim() || uploadedFile) { - // Pass the responseId from upload (if any) to handleSendMessage - handleSendMessage( - !input.trim() ? FILE_CONFIRMATION : input, - uploadedResponseId || undefined, - ); - } - }; - - const toggleFunctionCall = (functionCallId: string) => { - setExpandedFunctionCalls((prev) => { - const newSet = new Set(prev); - if (newSet.has(functionCallId)) { - newSet.delete(functionCallId); - } else { - newSet.add(functionCallId); - } - return newSet; - }); - }; - - const handleForkConversation = ( - messageIndex: number, - event?: React.MouseEvent, - ) => { - // Prevent any default behavior and stop event propagation - if (event) { - event.preventDefault(); - event.stopPropagation(); - } - - // Set interaction state to prevent auto-scroll interference - setIsUserInteracting(true); - setIsForkingInProgress(true); - - console.log("Fork conversation called for message index:", messageIndex); - - // Get messages up to and including the selected assistant message - const messagesToKeep = messages.slice(0, messageIndex + 1); - - // The selected message should be an assistant message (since fork button is only on assistant messages) - const forkedMessage = messages[messageIndex]; - if (forkedMessage.role !== "assistant") { - console.error("Fork button should only be on assistant messages"); - setIsUserInteracting(false); - setIsForkingInProgress(false); - return; - } - - // For forking, we want to continue from the response_id of the assistant message we're forking from - // Since we don't store individual response_ids per message yet, we'll use the current conversation's response_id - // This means we're continuing the conversation thread from that point - const responseIdToForkFrom = - currentConversationId || previousResponseIds[endpoint]; - - // Create a new conversation by properly forking - setMessages(messagesToKeep); - - // Use the chat context's fork method which handles creating a new conversation properly - if (forkFromResponse) { - forkFromResponse(responseIdToForkFrom || ""); - } else { - // Fallback to manual approach - setCurrentConversationId(null); // This creates a new conversation thread - - // Set the response_id we want to continue from as the previous response ID - // This tells the backend to continue the conversation from this point - setPreviousResponseIds((prev) => ({ - ...prev, - [endpoint]: responseIdToForkFrom, - })); - } - - console.log("Forked conversation with", messagesToKeep.length, "messages"); - - // Reset interaction state after a longer delay to ensure all effects complete - setTimeout(() => { - setIsUserInteracting(false); - setIsForkingInProgress(false); - console.log("Fork interaction complete, re-enabling auto effects"); - }, 500); - - // The original conversation remains unchanged in the sidebar - // This new forked conversation will get its own response_id when the user sends the next message - }; - - const handleSuggestionClick = (suggestion: string) => { - handleSendMessage(suggestion); - }; - - return ( - <> - {/* Debug header - only show in debug mode */} - {isDebugMode && ( -
-
-
- {/* Async Mode Toggle */} -
- - -
- {/* Endpoint Toggle */} -
- - -
-
-
- )} - - -
- {messages.length === 0 && !streamingMessage ? ( -
-
- {isUploading ? ( - <> - -

Processing your document...

-

This may take a few moments

- - ) : null} -
-
- ) : ( - <> - {messages.map((message, index) => - message.role === "user" - ? (messages[index]?.content.match(FILES_REGEX)?.[0] ?? - null) === null && ( -
- = 2 && - (messages[index - 2]?.content.match( - FILES_REGEX, - )?.[0] ?? - undefined) && - message.content === FILE_CONFIRMATION - ? undefined - : message.content - } - files={ - index >= 2 - ? (messages[index - 2]?.content.match( - FILES_REGEX, - )?.[0] ?? undefined) - : undefined - } - /> -
- ) - : message.role === "assistant" && - (index < 1 || - (messages[index - 1]?.content.match(FILES_REGEX)?.[0] ?? - null) === null) && ( -
- handleForkConversation(index, e)} - animate={false} - isInactive={index < messages.length - 1} - isInitialGreeting={ - index === 0 && - messages.length === 1 && - message.content === "How can I assist?" - } - /> -
- ), - )} - - {/* Streaming Message Display */} - {streamingMessage && ( - - )} - - {/* Waiting too long indicator */} - {waitingTooLong && !streamingMessage && loading && ( -
-
- - The server is taking longer than expected... -
-

- This may be due to high server load. The request will - timeout after 60 seconds. -

-
- )} - - )} - {!streamingMessage && ( -
- -
- )} -
-
-
- {/* Input Area - Fixed at bottom */} - { - // Handle backspace for filter clearing - if ( - e.key === "Backspace" && - selectedFilter && - input.trim() === "" - ) { - e.preventDefault(); - if (isFilterHighlighted) { - // Second backspace - remove the filter - setConversationFilter(null); - setIsFilterHighlighted(false); - } else { - // First backspace - highlight the filter - setIsFilterHighlighted(true); - } - return; - } - - // Handle Enter key for form submission - if (e.key === "Enter" && !e.shiftKey) { - e.preventDefault(); - if (input.trim() && !loading) { - // Trigger form submission by finding the form and calling submit - const form = e.currentTarget.closest("form"); - if (form) { - form.requestSubmit(); - } - } - } - }} - onFilterSelect={handleFilterSelect} - onFilePickerClick={handleFilePickerClick} - onFileSelected={setUploadedFile} - setSelectedFilter={setConversationFilter} - setIsFilterHighlighted={setIsFilterHighlighted} - /> -
- - ); + const isDebugMode = process.env.NEXT_PUBLIC_OPENRAG_DEBUG === "true"; + const { + endpoint, + setEndpoint, + currentConversationId, + conversationData, + setCurrentConversationId, + addConversationDoc, + forkFromResponse, + refreshConversations, + refreshConversationsSilent, + refreshTrigger, + previousResponseIds, + setPreviousResponseIds, + placeholderConversation, + conversationFilter, + setConversationFilter, + } = useChat(); + const [messages, setMessages] = useState([ + { + role: "assistant", + content: "How can I assist?", + timestamp: new Date(), + }, + ]); + const [input, setInput] = useState(""); + const { loading, setLoading } = useLoadingStore(); + const { setChatError } = useChat(); + const [asyncMode, setAsyncMode] = useState(true); + const [expandedFunctionCalls, setExpandedFunctionCalls] = useState< + Set + >(new Set()); + // previousResponseIds now comes from useChat context + const [isUploading, setIsUploading] = useState(false); + const [isFilterHighlighted, setIsFilterHighlighted] = useState(false); + const [isUserInteracting, setIsUserInteracting] = useState(false); + const [isForkingInProgress, setIsForkingInProgress] = useState(false); + const [uploadedFile, setUploadedFile] = useState(null); + const [waitingTooLong, setWaitingTooLong] = useState(false); + + const chatInputRef = useRef(null); + + const { scrollToBottom } = useStickToBottomContext(); + + const lastLoadedConversationRef = useRef(null); + const { addTask } = useTask(); + + // Check if chat history is loading + const { isLoading: isConversationsLoading } = useGetConversationsQuery( + endpoint, + refreshTrigger, + ); + + // Use conversation-specific filter instead of global filter + const selectedFilter = conversationFilter; + + // Parse the conversation filter data + const parsedFilterData = useMemo(() => { + if (!selectedFilter?.query_data) return null; + try { + return JSON.parse(selectedFilter.query_data); + } catch (error) { + console.error("Error parsing filter data:", error); + return null; + } + }, [selectedFilter]); + + // Use the chat streaming hook + const apiEndpoint = endpoint === "chat" ? "/api/chat" : "/api/langflow"; + const { + streamingMessage, + sendMessage: sendStreamingMessage, + abortStream, + isLoading: isStreamLoading, + } = useChatStreaming({ + endpoint: apiEndpoint, + onComplete: (message, responseId) => { + setMessages((prev) => [...prev, message]); + setLoading(false); + setWaitingTooLong(false); + if (responseId) { + cancelNudges(); + setPreviousResponseIds((prev) => ({ + ...prev, + [endpoint]: responseId, + })); + + if (!currentConversationId) { + setCurrentConversationId(responseId); + refreshConversations(true); + } else { + refreshConversationsSilent(); + } + + // Save filter association for this response + if (conversationFilter && typeof window !== "undefined") { + const newKey = `conversation_filter_${responseId}`; + localStorage.setItem(newKey, conversationFilter.id); + console.log( + "[CHAT] Saved filter association:", + newKey, + "=", + conversationFilter.id, + ); + } + } + }, + onError: (error) => { + console.error("Streaming error:", error); + setLoading(false); + setWaitingTooLong(false); + // Set chat error flag to trigger test_completion=true on health checks + setChatError(true); + const errorMessage: Message = { + role: "assistant", + content: + "Sorry, I couldn't connect to the chat service. Please try again.", + timestamp: new Date(), + }; + setMessages((prev) => [...prev, errorMessage]); + }, + }); + + // Show warning if waiting too long (20 seconds) + useEffect(() => { + let timeoutId: NodeJS.Timeout | null = null; + + if (isStreamLoading && !streamingMessage) { + timeoutId = setTimeout(() => { + setWaitingTooLong(true); + }, 20000); // 20 seconds + } else { + setWaitingTooLong(false); + } + + return () => { + if (timeoutId) clearTimeout(timeoutId); + }; + }, [isStreamLoading, streamingMessage]); + + const handleEndpointChange = (newEndpoint: EndpointType) => { + setEndpoint(newEndpoint); + // Clear the conversation when switching endpoints to avoid response ID conflicts + setMessages([]); + setPreviousResponseIds({ chat: null, langflow: null }); + }; + + const handleFileUpload = async (file: File) => { + console.log("handleFileUpload called with file:", file.name); + + if (isUploading) return; + + setIsUploading(true); + setLoading(true); + + try { + const formData = new FormData(); + formData.append("file", file); + formData.append("endpoint", endpoint); + + // Add previous_response_id if we have one for this endpoint + const currentResponseId = previousResponseIds[endpoint]; + if (currentResponseId) { + formData.append("previous_response_id", currentResponseId); + } + + const response = await fetch("/api/upload_context", { + method: "POST", + body: formData, + }); + + console.log("Upload response status:", response.status); + + if (!response.ok) { + const errorText = await response.text(); + console.error( + "Upload failed with status:", + response.status, + "Response:", + errorText, + ); + throw new Error("Failed to process document"); + } + + const result = await response.json(); + console.log("Upload result:", result); + + if (!response.ok) { + // Set chat error flag if upload fails + setChatError(true); + } + + if (response.status === 201) { + // New flow: Got task ID, start tracking with centralized system + const taskId = result.task_id || result.id; + + if (!taskId) { + console.error("No task ID in 201 response:", result); + throw new Error("No task ID received from server"); + } + + // Add task to centralized tracking + addTask(taskId); + + return null; + } else if (response.ok) { + // Original flow: Direct response + + const uploadMessage: Message = { + role: "user", + content: `I'm uploading a document called "${result.filename}". Here is its content:`, + timestamp: new Date(), + }; + + const confirmationMessage: Message = { + role: "assistant", + content: `Confirmed`, + timestamp: new Date(), + }; + + setMessages((prev) => [...prev, uploadMessage, confirmationMessage]); + + // Add file to conversation docs + if (result.filename) { + addConversationDoc(result.filename); + } + + // Update the response ID for this endpoint + if (result.response_id) { + setPreviousResponseIds((prev) => ({ + ...prev, + [endpoint]: result.response_id, + })); + + // If this is a new conversation (no currentConversationId), set it now + if (!currentConversationId) { + setCurrentConversationId(result.response_id); + refreshConversations(true); + } else { + // For existing conversations, do a silent refresh to keep backend in sync + refreshConversationsSilent(); + } + + return result.response_id; + } + } else { + throw new Error(`Upload failed: ${response.status}`); + } + } catch (error) { + console.error("Upload failed:", error); + // Set chat error flag to trigger test_completion=true on health checks + setChatError(true); + const errorMessage: Message = { + role: "assistant", + content: `❌ Failed to process document. Please try again.`, + timestamp: new Date(), + }; + setMessages((prev) => [...prev.slice(0, -1), errorMessage]); + } finally { + setIsUploading(false); + setLoading(false); + } + }; + + const handleFilePickerClick = () => { + chatInputRef.current?.clickFileInput(); + }; + + const handleFilterSelect = (filter: KnowledgeFilterData | null) => { + // Update conversation-specific filter + setConversationFilter(filter); + setIsFilterHighlighted(false); + }; + + // Auto-focus the input on component mount + useEffect(() => { + chatInputRef.current?.focusInput(); + }, []); + + // Explicitly handle external new conversation trigger + useEffect(() => { + const handleNewConversation = () => { + // Abort any in-flight streaming so it doesn't bleed into new chat + abortStream(); + // Reset chat UI even if context state was already 'new' + setMessages([ + { + role: "assistant", + content: "How can I assist?", + timestamp: new Date(), + }, + ]); + setInput(""); + setExpandedFunctionCalls(new Set()); + setIsFilterHighlighted(false); + setLoading(false); + lastLoadedConversationRef.current = null; + + // Focus input after a short delay to ensure rendering is complete + setTimeout(() => { + chatInputRef.current?.focusInput(); + }, 100); + }; + + const handleFocusInput = () => { + chatInputRef.current?.focusInput(); + }; + + window.addEventListener("newConversation", handleNewConversation); + window.addEventListener("focusInput", handleFocusInput); + return () => { + window.removeEventListener("newConversation", handleNewConversation); + window.removeEventListener("focusInput", handleFocusInput); + }; + }, [abortStream, setLoading]); + + // Load conversation only when user explicitly selects a conversation + useEffect(() => { + // Only load conversation data when: + // 1. conversationData exists AND + // 2. It's different from the last loaded conversation AND + // 3. User is not in the middle of an interaction + if ( + conversationData?.messages && + lastLoadedConversationRef.current !== conversationData.response_id && + !isUserInteracting && + !isForkingInProgress + ) { + console.log( + "Loading conversation with", + conversationData.messages.length, + "messages", + ); + // Convert backend message format to frontend Message interface + const convertedMessages: Message[] = conversationData.messages.map( + (msg: { + role: string; + content: string; + timestamp?: string; + response_id?: string; + chunks?: Array<{ + item?: { + type?: string; + tool_name?: string; + id?: string; + inputs?: unknown; + results?: unknown; + status?: string; + }; + delta?: { + tool_calls?: Array<{ + id?: string; + function?: { name?: string; arguments?: string }; + type?: string; + }>; + }; + type?: string; + result?: unknown; + output?: unknown; + response?: unknown; + }>; + response_data?: unknown; + }) => { + const message: Message = { + role: msg.role as "user" | "assistant", + content: msg.content, + timestamp: new Date(msg.timestamp || new Date()), + }; + + // Extract function calls from chunks or response_data + if (msg.role === "assistant" && (msg.chunks || msg.response_data)) { + const functionCalls: FunctionCall[] = []; + console.log("Processing assistant message for function calls:", { + hasChunks: !!msg.chunks, + chunksLength: msg.chunks?.length, + hasResponseData: !!msg.response_data, + }); + + // Process chunks (streaming data) + if (msg.chunks && Array.isArray(msg.chunks)) { + for (const chunk of msg.chunks) { + // Handle Langflow format: chunks[].item.tool_call + if (chunk.item && chunk.item.type === "tool_call") { + const toolCall = chunk.item; + console.log("Found Langflow tool call:", toolCall); + functionCalls.push({ + id: toolCall.id || "", + name: toolCall.tool_name || "unknown", + arguments: + (toolCall.inputs as Record) || {}, + argumentsString: JSON.stringify(toolCall.inputs || {}), + result: toolCall.results as + | Record + | ToolCallResult[], + status: + (toolCall.status as "pending" | "completed" | "error") || + "completed", + type: "tool_call", + }); + } + // Handle OpenAI format: chunks[].delta.tool_calls + else if (chunk.delta?.tool_calls) { + for (const toolCall of chunk.delta.tool_calls) { + if (toolCall.function) { + functionCalls.push({ + id: toolCall.id || "", + name: toolCall.function.name || "unknown", + arguments: toolCall.function.arguments + ? JSON.parse(toolCall.function.arguments) + : {}, + argumentsString: toolCall.function.arguments || "", + status: "completed", + type: toolCall.type || "function", + }); + } + } + } + // Process tool call results from chunks + if ( + chunk.type === "response.tool_call.result" || + chunk.type === "tool_call_result" + ) { + const lastCall = functionCalls[functionCalls.length - 1]; + if (lastCall) { + lastCall.result = + (chunk.result as + | Record + | ToolCallResult[]) || + (chunk as Record); + lastCall.status = "completed"; + } + } + } + } + + // Process response_data (non-streaming data) + if (msg.response_data && typeof msg.response_data === "object") { + // Look for tool_calls in various places in the response data + const responseData = + typeof msg.response_data === "string" + ? JSON.parse(msg.response_data) + : msg.response_data; + + if ( + responseData.tool_calls && + Array.isArray(responseData.tool_calls) + ) { + for (const toolCall of responseData.tool_calls) { + functionCalls.push({ + id: toolCall.id, + name: toolCall.function?.name || toolCall.name, + arguments: + toolCall.function?.arguments || toolCall.arguments, + argumentsString: + typeof ( + toolCall.function?.arguments || toolCall.arguments + ) === "string" + ? toolCall.function?.arguments || toolCall.arguments + : JSON.stringify( + toolCall.function?.arguments || toolCall.arguments, + ), + result: toolCall.result, + status: "completed", + type: toolCall.type || "function", + }); + } + } + } + + if (functionCalls.length > 0) { + console.log("Setting functionCalls on message:", functionCalls); + message.functionCalls = functionCalls; + } else { + console.log("No function calls found in message"); + } + } + + return message; + }, + ); + + setMessages(convertedMessages); + lastLoadedConversationRef.current = conversationData.response_id; + + // Set the previous response ID for this conversation + setPreviousResponseIds((prev) => ({ + ...prev, + [conversationData.endpoint]: conversationData.response_id, + })); + + // Focus input when loading a conversation + setTimeout(() => { + chatInputRef.current?.focusInput(); + }, 100); + } else if (!conversationData) { + // No conversation selected (new conversation) + lastLoadedConversationRef.current = null; + } + }, [ + conversationData, + isUserInteracting, + isForkingInProgress, + setPreviousResponseIds, + ]); + + // Handle new conversation creation - only reset messages when placeholderConversation is set + useEffect(() => { + if (placeholderConversation && currentConversationId === null) { + console.log("Starting new conversation"); + setMessages([ + { + role: "assistant", + content: "How can I assist?", + timestamp: new Date(), + }, + ]); + lastLoadedConversationRef.current = null; + + // Focus input when starting a new conversation + setTimeout(() => { + chatInputRef.current?.focusInput(); + }, 100); + } + }, [placeholderConversation, currentConversationId]); + + // Listen for file upload events from navigation + useEffect(() => { + const handleFileUploadStart = (event: CustomEvent) => { + const { filename } = event.detail; + console.log("Chat page received file upload start event:", filename); + + setLoading(true); + setIsUploading(true); + setUploadedFile(null); // Clear previous file + }; + + const handleFileUploaded = (event: CustomEvent) => { + const { result } = event.detail; + console.log("Chat page received file upload event:", result); + + setUploadedFile(null); // Clear file after upload + + // Update the response ID for this endpoint + if (result.response_id) { + setPreviousResponseIds((prev) => ({ + ...prev, + [endpoint]: result.response_id, + })); + } + }; + + const handleFileUploadComplete = () => { + console.log("Chat page received file upload complete event"); + setLoading(false); + setIsUploading(false); + }; + + const handleFileUploadError = (event: CustomEvent) => { + const { filename, error } = event.detail; + console.log( + "Chat page received file upload error event:", + filename, + error, + ); + + // Replace the last message with error message + const errorMessage: Message = { + role: "assistant", + content: `❌ Upload failed for **${filename}**: ${error}`, + timestamp: new Date(), + }; + setMessages((prev) => [...prev.slice(0, -1), errorMessage]); + setUploadedFile(null); // Clear file on error + }; + + window.addEventListener( + "fileUploadStart", + handleFileUploadStart as EventListener, + ); + window.addEventListener( + "fileUploaded", + handleFileUploaded as EventListener, + ); + window.addEventListener( + "fileUploadComplete", + handleFileUploadComplete as EventListener, + ); + window.addEventListener( + "fileUploadError", + handleFileUploadError as EventListener, + ); + + return () => { + window.removeEventListener( + "fileUploadStart", + handleFileUploadStart as EventListener, + ); + window.removeEventListener( + "fileUploaded", + handleFileUploaded as EventListener, + ); + window.removeEventListener( + "fileUploadComplete", + handleFileUploadComplete as EventListener, + ); + window.removeEventListener( + "fileUploadError", + handleFileUploadError as EventListener, + ); + }; + }, [endpoint, setPreviousResponseIds, setLoading]); + + // Check if onboarding is complete by looking at local storage + const [isOnboardingComplete, setIsOnboardingComplete] = useState(() => { + if (typeof window === "undefined") return false; + return localStorage.getItem("onboarding-step") === null; + }); + + // Listen for storage changes to detect when onboarding completes + useEffect(() => { + const checkOnboarding = () => { + if (typeof window !== "undefined") { + setIsOnboardingComplete( + localStorage.getItem("onboarding-step") === null, + ); + } + }; + + // Check periodically since storage events don't fire in the same tab + const interval = setInterval(checkOnboarding, 500); + + return () => clearInterval(interval); + }, []); + + // Prepare filters for nudges (same as chat) + const processedFiltersForNudges = parsedFilterData?.filters + ? (() => { + const filters = parsedFilterData.filters; + const processed: SelectedFilters = { + data_sources: [], + document_types: [], + owners: [], + }; + processed.data_sources = filters.data_sources.includes("*") + ? [] + : filters.data_sources; + processed.document_types = filters.document_types.includes("*") + ? [] + : filters.document_types; + processed.owners = filters.owners.includes("*") ? [] : filters.owners; + + const hasFilters = + processed.data_sources.length > 0 || + processed.document_types.length > 0 || + processed.owners.length > 0; + return hasFilters ? processed : undefined; + })() + : undefined; + + const { data: nudges = [], cancel: cancelNudges } = useGetNudgesQuery( + { + chatId: previousResponseIds[endpoint], + filters: processedFiltersForNudges, + limit: parsedFilterData?.limit ?? 3, + scoreThreshold: parsedFilterData?.scoreThreshold ?? 0, + }, + { + enabled: isOnboardingComplete && !isConversationsLoading, // Only fetch nudges after onboarding is complete AND chat history is not loading + }, + ); + + const handleSSEStream = async ( + userMessage: Message, + previousResponseId?: string, + ) => { + // Prepare filters + const processedFilters = parsedFilterData?.filters + ? (() => { + const filters = parsedFilterData.filters; + const processed: SelectedFilters = { + data_sources: [], + document_types: [], + owners: [], + }; + processed.data_sources = filters.data_sources.includes("*") + ? [] + : filters.data_sources; + processed.document_types = filters.document_types.includes("*") + ? [] + : filters.document_types; + processed.owners = filters.owners.includes("*") ? [] : filters.owners; + + const hasFilters = + processed.data_sources.length > 0 || + processed.document_types.length > 0 || + processed.owners.length > 0; + return hasFilters ? processed : undefined; + })() + : undefined; + + // Use passed previousResponseId if available, otherwise fall back to state + const responseIdToUse = previousResponseId || previousResponseIds[endpoint]; + + console.log("[CHAT] Sending streaming message:", { + conversationFilter: conversationFilter?.id, + currentConversationId, + responseIdToUse, + }); + + // Use the hook to send the message + await sendStreamingMessage({ + prompt: userMessage.content, + previousResponseId: responseIdToUse || undefined, + filters: processedFilters, + filter_id: conversationFilter?.id, // ✅ Add filter_id for this conversation + limit: parsedFilterData?.limit ?? 10, + scoreThreshold: parsedFilterData?.scoreThreshold ?? 0, + }); + scrollToBottom({ + animation: "smooth", + duration: 1000, + }); + }; + + const handleSendMessage = async ( + inputMessage: string, + previousResponseId?: string, + ) => { + if (!inputMessage.trim() || loading) return; + + const userMessage: Message = { + role: "user", + content: inputMessage.trim(), + timestamp: new Date(), + }; + + if (messages.length === 1) { + setMessages([userMessage]); + } else { + setMessages((prev) => [...prev, userMessage]); + } + setInput(""); + setLoading(true); + setIsFilterHighlighted(false); + + scrollToBottom({ + animation: "smooth", + duration: 1000, + }); + + if (asyncMode) { + await handleSSEStream(userMessage, previousResponseId); + } else { + // Original non-streaming logic + try { + const apiEndpoint = endpoint === "chat" ? "/api/chat" : "/api/langflow"; + + const requestBody: RequestBody = { + prompt: userMessage.content, + ...(parsedFilterData?.filters && + (() => { + const filters = parsedFilterData.filters; + const processed: SelectedFilters = { + data_sources: [], + document_types: [], + owners: [], + }; + // Only copy non-wildcard arrays + processed.data_sources = filters.data_sources.includes("*") + ? [] + : filters.data_sources; + processed.document_types = filters.document_types.includes("*") + ? [] + : filters.document_types; + processed.owners = filters.owners.includes("*") + ? [] + : filters.owners; + + // Only include filters if any array has values + const hasFilters = + processed.data_sources.length > 0 || + processed.document_types.length > 0 || + processed.owners.length > 0; + return hasFilters ? { filters: processed } : {}; + })()), + limit: parsedFilterData?.limit ?? 10, + scoreThreshold: parsedFilterData?.scoreThreshold ?? 0, + }; + + // Add previous_response_id if we have one for this endpoint + const currentResponseId = previousResponseIds[endpoint]; + if (currentResponseId) { + requestBody.previous_response_id = currentResponseId; + } + + // Add filter_id if a filter is selected for this conversation + if (conversationFilter) { + requestBody.filter_id = conversationFilter.id; + } + + // Debug logging + console.log("[DEBUG] Sending message with:", { + previous_response_id: requestBody.previous_response_id, + filter_id: requestBody.filter_id, + currentConversationId, + previousResponseIds, + }); + + const response = await fetch(apiEndpoint, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify(requestBody), + }); + + const result = await response.json(); + + if (response.ok) { + const assistantMessage: Message = { + role: "assistant", + content: result.response, + timestamp: new Date(), + }; + setMessages((prev) => [...prev, assistantMessage]); + if (result.response_id) { + cancelNudges(); + } + + // Store the response ID if present for this endpoint + if (result.response_id) { + console.log( + "[DEBUG] Received response_id:", + result.response_id, + "currentConversationId:", + currentConversationId, + ); + + setPreviousResponseIds((prev) => ({ + ...prev, + [endpoint]: result.response_id, + })); + + // If this is a new conversation (no currentConversationId), set it now + if (!currentConversationId) { + console.log( + "[DEBUG] Setting currentConversationId to:", + result.response_id, + ); + setCurrentConversationId(result.response_id); + refreshConversations(true); + } else { + console.log( + "[DEBUG] Existing conversation, doing silent refresh", + ); + // For existing conversations, do a silent refresh to keep backend in sync + refreshConversationsSilent(); + } + + // Carry forward the filter association to the new response_id + if (conversationFilter && typeof window !== "undefined") { + const newKey = `conversation_filter_${result.response_id}`; + localStorage.setItem(newKey, conversationFilter.id); + console.log( + "[DEBUG] Saved filter association:", + newKey, + "=", + conversationFilter.id, + ); + } + } + } else { + console.error("Chat failed:", result.error); + // Set chat error flag to trigger test_completion=true on health checks + setChatError(true); + const errorMessage: Message = { + role: "assistant", + content: "Sorry, I encountered an error. Please try again.", + timestamp: new Date(), + }; + setMessages((prev) => [...prev, errorMessage]); + } + } catch (error) { + console.error("Chat error:", error); + // Set chat error flag to trigger test_completion=true on health checks + setChatError(true); + const errorMessage: Message = { + role: "assistant", + content: + "Sorry, I couldn't connect to the chat service. Please try again.", + timestamp: new Date(), + }; + setMessages((prev) => [...prev, errorMessage]); + } + } + + setLoading(false); + }; + + const handleSubmit = async (e: React.FormEvent) => { + e.preventDefault(); + + // Check if there's an uploaded file and upload it first + let uploadedResponseId: string | null = null; + if (uploadedFile) { + // Upload the file first + const responseId = await handleFileUpload(uploadedFile); + // Clear the file after upload + setUploadedFile(null); + + // If the upload resulted in a new conversation, store the response ID + if (responseId) { + uploadedResponseId = responseId; + setPreviousResponseIds((prev) => ({ + ...prev, + [endpoint]: responseId, + })); + } + } + + // Only send message if there's input text + if (input.trim() || uploadedFile) { + // Pass the responseId from upload (if any) to handleSendMessage + handleSendMessage( + !input.trim() ? FILE_CONFIRMATION : input, + uploadedResponseId || undefined, + ); + } + }; + + const toggleFunctionCall = (functionCallId: string) => { + setExpandedFunctionCalls((prev) => { + const newSet = new Set(prev); + if (newSet.has(functionCallId)) { + newSet.delete(functionCallId); + } else { + newSet.add(functionCallId); + } + return newSet; + }); + }; + + const handleForkConversation = ( + messageIndex: number, + event?: React.MouseEvent, + ) => { + // Prevent any default behavior and stop event propagation + if (event) { + event.preventDefault(); + event.stopPropagation(); + } + + // Set interaction state to prevent auto-scroll interference + setIsUserInteracting(true); + setIsForkingInProgress(true); + + console.log("Fork conversation called for message index:", messageIndex); + + // Get messages up to and including the selected assistant message + const messagesToKeep = messages.slice(0, messageIndex + 1); + + // The selected message should be an assistant message (since fork button is only on assistant messages) + const forkedMessage = messages[messageIndex]; + if (forkedMessage.role !== "assistant") { + console.error("Fork button should only be on assistant messages"); + setIsUserInteracting(false); + setIsForkingInProgress(false); + return; + } + + // For forking, we want to continue from the response_id of the assistant message we're forking from + // Since we don't store individual response_ids per message yet, we'll use the current conversation's response_id + // This means we're continuing the conversation thread from that point + const responseIdToForkFrom = + currentConversationId || previousResponseIds[endpoint]; + + // Create a new conversation by properly forking + setMessages(messagesToKeep); + + // Use the chat context's fork method which handles creating a new conversation properly + if (forkFromResponse) { + forkFromResponse(responseIdToForkFrom || ""); + } else { + // Fallback to manual approach + setCurrentConversationId(null); // This creates a new conversation thread + + // Set the response_id we want to continue from as the previous response ID + // This tells the backend to continue the conversation from this point + setPreviousResponseIds((prev) => ({ + ...prev, + [endpoint]: responseIdToForkFrom, + })); + } + + console.log("Forked conversation with", messagesToKeep.length, "messages"); + + // Reset interaction state after a longer delay to ensure all effects complete + setTimeout(() => { + setIsUserInteracting(false); + setIsForkingInProgress(false); + console.log("Fork interaction complete, re-enabling auto effects"); + }, 500); + + // The original conversation remains unchanged in the sidebar + // This new forked conversation will get its own response_id when the user sends the next message + }; + + const handleSuggestionClick = (suggestion: string) => { + handleSendMessage(suggestion); + }; + + return ( + <> + {/* Debug header - only show in debug mode */} + {isDebugMode && ( +
+
+
+ {/* Async Mode Toggle */} +
+ + +
+ {/* Endpoint Toggle */} +
+ + +
+
+
+ )} + + +
+ {messages.length === 0 && !streamingMessage ? ( +
+
+ {isUploading ? ( + <> + +

Processing your document...

+

This may take a few moments

+ + ) : null} +
+
+ ) : ( + <> + {messages.map((message, index) => + message.role === "user" + ? (messages[index]?.content.match(FILES_REGEX)?.[0] ?? + null) === null && ( +
+ = 2 && + (messages[index - 2]?.content.match( + FILES_REGEX, + )?.[0] ?? + undefined) && + message.content === FILE_CONFIRMATION + ? undefined + : message.content + } + files={ + index >= 2 + ? (messages[index - 2]?.content.match( + FILES_REGEX, + )?.[0] ?? undefined) + : undefined + } + /> +
+ ) + : message.role === "assistant" && + (index < 1 || + (messages[index - 1]?.content.match(FILES_REGEX)?.[0] ?? + null) === null) && ( +
+ handleForkConversation(index, e)} + animate={false} + isInactive={index < messages.length - 1} + isInitialGreeting={ + index === 0 && + messages.length === 1 && + message.content === "How can I assist?" + } + /> +
+ ), + )} + + {/* Streaming Message Display */} + {streamingMessage && ( + + )} + + {/* Waiting too long indicator */} + {waitingTooLong && !streamingMessage && loading && ( +
+
+ + The server is taking longer than expected... +
+

+ This may be due to high server load. The request will + timeout after 60 seconds. +

+
+ )} + + )} + {!streamingMessage && ( +
+ +
+ )} +
+
+
+ {/* Input Area - Fixed at bottom */} + { + // Handle backspace for filter clearing + if ( + e.key === "Backspace" && + selectedFilter && + input.trim() === "" + ) { + e.preventDefault(); + if (isFilterHighlighted) { + // Second backspace - remove the filter + setConversationFilter(null); + setIsFilterHighlighted(false); + } else { + // First backspace - highlight the filter + setIsFilterHighlighted(true); + } + return; + } + + // Handle Enter key for form submission + if (e.key === "Enter" && !e.shiftKey) { + e.preventDefault(); + if (input.trim() && !loading) { + // Trigger form submission by finding the form and calling submit + const form = e.currentTarget.closest("form"); + if (form) { + form.requestSubmit(); + } + } + } + }} + onFilterSelect={handleFilterSelect} + onFilePickerClick={handleFilePickerClick} + onFileSelected={setUploadedFile} + setSelectedFilter={setConversationFilter} + setIsFilterHighlighted={setIsFilterHighlighted} + /> +
+ + ); } export default function ProtectedChatPage() { - return ( - -
- - - -
-
- ); + return ( + +
+ + + +
+
+ ); } diff --git a/frontend/app/onboarding/_components/ibm-onboarding.tsx b/frontend/app/onboarding/_components/ibm-onboarding.tsx index 77fef020..023b3467 100644 --- a/frontend/app/onboarding/_components/ibm-onboarding.tsx +++ b/frontend/app/onboarding/_components/ibm-onboarding.tsx @@ -1,13 +1,13 @@ import type { Dispatch, SetStateAction } from "react"; import { useEffect, useState } from "react"; +import IBMLogo from "@/components/icons/ibm-logo"; import { LabelInput } from "@/components/label-input"; import { LabelWrapper } from "@/components/label-wrapper"; -import IBMLogo from "@/components/icons/ibm-logo"; import { Switch } from "@/components/ui/switch"; import { - Tooltip, - TooltipContent, - TooltipTrigger, + Tooltip, + TooltipContent, + TooltipTrigger, } from "@/components/ui/tooltip"; import { useDebouncedValue } from "@/lib/debounce"; import type { OnboardingVariables } from "../../api/mutations/useOnboardingMutation"; @@ -18,273 +18,273 @@ import { AdvancedOnboarding } from "./advanced"; import { ModelSelector } from "./model-selector"; export function IBMOnboarding({ - isEmbedding = false, - setSettings, - sampleDataset, - setSampleDataset, - setIsLoadingModels, - alreadyConfigured = false, - existingEndpoint, - existingProjectId, - hasEnvApiKey = false, + isEmbedding = false, + setSettings, + sampleDataset, + setSampleDataset, + setIsLoadingModels, + alreadyConfigured = false, + existingEndpoint, + existingProjectId, + hasEnvApiKey = false, }: { - isEmbedding?: boolean; - setSettings: Dispatch>; - sampleDataset: boolean; - setSampleDataset: (dataset: boolean) => void; - setIsLoadingModels?: (isLoading: boolean) => void; - alreadyConfigured?: boolean; - existingEndpoint?: string; - existingProjectId?: string; - hasEnvApiKey?: boolean; + isEmbedding?: boolean; + setSettings: Dispatch>; + sampleDataset: boolean; + setSampleDataset: (dataset: boolean) => void; + setIsLoadingModels?: (isLoading: boolean) => void; + alreadyConfigured?: boolean; + existingEndpoint?: string; + existingProjectId?: string; + hasEnvApiKey?: boolean; }) { - const [endpoint, setEndpoint] = useState( - alreadyConfigured ? "" : (existingEndpoint || "https://us-south.ml.cloud.ibm.com"), - ); - const [apiKey, setApiKey] = useState(""); - const [getFromEnv, setGetFromEnv] = useState( - hasEnvApiKey && !alreadyConfigured, - ); - const [projectId, setProjectId] = useState( - alreadyConfigured ? "" : (existingProjectId || ""), - ); + const [endpoint, setEndpoint] = useState( + alreadyConfigured + ? "" + : existingEndpoint || "https://us-south.ml.cloud.ibm.com", + ); + const [apiKey, setApiKey] = useState(""); + const [getFromEnv, setGetFromEnv] = useState( + hasEnvApiKey && !alreadyConfigured, + ); + const [projectId, setProjectId] = useState( + alreadyConfigured ? "" : existingProjectId || "", + ); - const options = [ - { - value: "https://us-south.ml.cloud.ibm.com", - label: "https://us-south.ml.cloud.ibm.com", - default: true, - }, - { - value: "https://eu-de.ml.cloud.ibm.com", - label: "https://eu-de.ml.cloud.ibm.com", - default: false, - }, - { - value: "https://eu-gb.ml.cloud.ibm.com", - label: "https://eu-gb.ml.cloud.ibm.com", - default: false, - }, - { - value: "https://au-syd.ml.cloud.ibm.com", - label: "https://au-syd.ml.cloud.ibm.com", - default: false, - }, - { - value: "https://jp-tok.ml.cloud.ibm.com", - label: "https://jp-tok.ml.cloud.ibm.com", - default: false, - }, - { - value: "https://ca-tor.ml.cloud.ibm.com", - label: "https://ca-tor.ml.cloud.ibm.com", - default: false, - }, - ]; - const debouncedEndpoint = useDebouncedValue(endpoint, 500); - const debouncedApiKey = useDebouncedValue(apiKey, 500); - const debouncedProjectId = useDebouncedValue(projectId, 500); + const options = [ + { + value: "https://us-south.ml.cloud.ibm.com", + label: "https://us-south.ml.cloud.ibm.com", + default: true, + }, + { + value: "https://eu-de.ml.cloud.ibm.com", + label: "https://eu-de.ml.cloud.ibm.com", + default: false, + }, + { + value: "https://eu-gb.ml.cloud.ibm.com", + label: "https://eu-gb.ml.cloud.ibm.com", + default: false, + }, + { + value: "https://au-syd.ml.cloud.ibm.com", + label: "https://au-syd.ml.cloud.ibm.com", + default: false, + }, + { + value: "https://jp-tok.ml.cloud.ibm.com", + label: "https://jp-tok.ml.cloud.ibm.com", + default: false, + }, + { + value: "https://ca-tor.ml.cloud.ibm.com", + label: "https://ca-tor.ml.cloud.ibm.com", + default: false, + }, + ]; + const debouncedEndpoint = useDebouncedValue(endpoint, 500); + const debouncedApiKey = useDebouncedValue(apiKey, 500); + const debouncedProjectId = useDebouncedValue(projectId, 500); - // Fetch models from API when all credentials are provided - const { - data: modelsData, - isLoading: isLoadingModels, - error: modelsError, - } = useGetIBMModelsQuery( - { - endpoint: debouncedEndpoint ? debouncedEndpoint : undefined, - apiKey: getFromEnv ? "" : (debouncedApiKey ? debouncedApiKey : undefined), - projectId: debouncedProjectId ? debouncedProjectId : undefined, - }, - { - enabled: - !!debouncedEndpoint || - !!debouncedApiKey || - !!debouncedProjectId || - getFromEnv || - alreadyConfigured, - }, - ); + // Fetch models from API when all credentials are provided + const { + data: modelsData, + isLoading: isLoadingModels, + error: modelsError, + } = useGetIBMModelsQuery( + { + endpoint: debouncedEndpoint ? debouncedEndpoint : undefined, + apiKey: getFromEnv ? "" : debouncedApiKey ? debouncedApiKey : undefined, + projectId: debouncedProjectId ? debouncedProjectId : undefined, + }, + { + enabled: + (!!debouncedEndpoint && !!debouncedApiKey && !!debouncedProjectId) || + getFromEnv || + alreadyConfigured, + }, + ); - // Use custom hook for model selection logic - const { - languageModel, - embeddingModel, - setLanguageModel, - setEmbeddingModel, - languageModels, - embeddingModels, - } = useModelSelection(modelsData, isEmbedding); + // Use custom hook for model selection logic + const { + languageModel, + embeddingModel, + setLanguageModel, + setEmbeddingModel, + languageModels, + embeddingModels, + } = useModelSelection(modelsData, isEmbedding); - const handleGetFromEnvChange = (fromEnv: boolean) => { - setGetFromEnv(fromEnv); - if (fromEnv) { - setApiKey(""); - } - setEmbeddingModel?.(""); - setLanguageModel?.(""); - }; + const handleGetFromEnvChange = (fromEnv: boolean) => { + setGetFromEnv(fromEnv); + if (fromEnv) { + setApiKey(""); + } + setEmbeddingModel?.(""); + setLanguageModel?.(""); + }; - const handleSampleDatasetChange = (dataset: boolean) => { - setSampleDataset(dataset); - }; + const handleSampleDatasetChange = (dataset: boolean) => { + setSampleDataset(dataset); + }; - useEffect(() => { - setIsLoadingModels?.(isLoadingModels); - }, [isLoadingModels, setIsLoadingModels]); + useEffect(() => { + setIsLoadingModels?.(isLoadingModels); + }, [isLoadingModels, setIsLoadingModels]); - // Update settings when values change - useUpdateSettings( - "watsonx", - { - endpoint, - apiKey, - projectId, - languageModel, - embeddingModel, - }, - setSettings, - isEmbedding, - ); + // Update settings when values change + useUpdateSettings( + "watsonx", + { + endpoint, + apiKey, + projectId, + languageModel, + embeddingModel, + }, + setSettings, + isEmbedding, + ); - return ( - <> -
- -
- {} : setEndpoint} - searchPlaceholder="Search endpoint..." - noOptionsPlaceholder={ - alreadyConfigured - ? "https://•••••••••••••••••••••••••••••••••••••••••" - : "No endpoints available" - } - placeholder="Select endpoint..." - /> - {alreadyConfigured && ( -

- Reusing endpoint from model provider selection. -

- )} -
-
+ return ( + <> +
+ +
+ {} : setEndpoint} + searchPlaceholder="Search endpoint..." + noOptionsPlaceholder={ + alreadyConfigured + ? "https://•••••••••••••••••••••••••••••••••••••••••" + : "No endpoints available" + } + placeholder="Select endpoint..." + /> + {alreadyConfigured && ( +

+ Reusing endpoint from model provider selection. +

+ )} +
+
-
- setProjectId(e.target.value)} - disabled={alreadyConfigured} - /> - {alreadyConfigured && ( -

- Reusing project ID from model provider selection. -

- )} -
- - - -
- -
-
- {!hasEnvApiKey && !alreadyConfigured && ( - - watsonx API key not detected in the environment. - - )} -
-
- {!getFromEnv && !alreadyConfigured && ( -
- setApiKey(e.target.value)} - /> - {isLoadingModels && ( -

- Validating API key... -

- )} - {modelsError && ( -

- Invalid watsonx API key. Verify or replace the key. -

- )} -
- )} - {alreadyConfigured && ( -
- setApiKey(e.target.value)} - disabled={true} - /> -

- Reusing API key from model provider selection. -

-
- )} - {getFromEnv && isLoadingModels && ( -

- Validating configuration... -

- )} - {getFromEnv && modelsError && ( -

- Connection failed. Check your configuration. -

- )} -
- } - languageModels={languageModels} - embeddingModels={embeddingModels} - languageModel={languageModel} - embeddingModel={embeddingModel} - sampleDataset={sampleDataset} - setLanguageModel={setLanguageModel} - setEmbeddingModel={setEmbeddingModel} - setSampleDataset={handleSampleDatasetChange} - /> - - ); +
+ setProjectId(e.target.value)} + disabled={alreadyConfigured} + /> + {alreadyConfigured && ( +

+ Reusing project ID from model provider selection. +

+ )} +
+ + + +
+ +
+
+ {!hasEnvApiKey && !alreadyConfigured && ( + + watsonx API key not detected in the environment. + + )} +
+
+ {!getFromEnv && !alreadyConfigured && ( +
+ setApiKey(e.target.value)} + /> + {isLoadingModels && ( +

+ Validating API key... +

+ )} + {modelsError && ( +

+ Invalid watsonx API key. Verify or replace the key. +

+ )} +
+ )} + {alreadyConfigured && ( +
+ setApiKey(e.target.value)} + disabled={true} + /> +

+ Reusing API key from model provider selection. +

+
+ )} + {getFromEnv && isLoadingModels && ( +

+ Validating configuration... +

+ )} + {getFromEnv && modelsError && ( +

+ Connection failed. Check your configuration. +

+ )} +
+ } + languageModels={languageModels} + embeddingModels={embeddingModels} + languageModel={languageModel} + embeddingModel={embeddingModel} + sampleDataset={sampleDataset} + setLanguageModel={setLanguageModel} + setEmbeddingModel={setEmbeddingModel} + setSampleDataset={handleSampleDatasetChange} + /> + + ); } diff --git a/frontend/app/onboarding/_components/onboarding-card.tsx b/frontend/app/onboarding/_components/onboarding-card.tsx index f82723f1..be7a25ef 100644 --- a/frontend/app/onboarding/_components/onboarding-card.tsx +++ b/frontend/app/onboarding/_components/onboarding-card.tsx @@ -3,12 +3,13 @@ import { useQueryClient } from "@tanstack/react-query"; import { AnimatePresence, motion } from "framer-motion"; import { X } from "lucide-react"; -import { useEffect, useState } from "react"; +import { useEffect, useRef, useState } from "react"; import { toast } from "sonner"; import { type OnboardingVariables, useOnboardingMutation, } from "@/app/api/mutations/useOnboardingMutation"; +import { useOnboardingRollbackMutation } from "@/app/api/mutations/useOnboardingRollbackMutation"; import { useGetSettingsQuery } from "@/app/api/queries/useGetSettingsQuery"; import { useGetTasksQuery } from "@/app/api/queries/useGetTasksQuery"; import type { ProviderHealthResponse } from "@/app/api/queries/useProviderHealthQuery"; @@ -170,12 +171,32 @@ const OnboardingCard = ({ const [error, setError] = useState(null); + // Track which tasks we've already handled to prevent infinite loops + const handledFailedTasksRef = useRef>(new Set()); + // Query tasks to track completion const { data: tasks } = useGetTasksQuery({ enabled: currentStep !== null, // Only poll when onboarding has started refetchInterval: currentStep !== null ? 1000 : false, // Poll every 1 second during onboarding }); + // Rollback mutation + const rollbackMutation = useOnboardingRollbackMutation({ + onSuccess: () => { + console.log("Onboarding rolled back successfully"); + // Reset to provider selection step + // Error message is already set before calling mutate + setCurrentStep(null); + }, + onError: (error) => { + console.error("Failed to rollback onboarding", error); + // Preserve existing error message if set, otherwise show rollback error + setError((prevError) => prevError || `Failed to rollback: ${error.message}`); + // Still reset to provider selection even if rollback fails + setCurrentStep(null); + }, + }); + // Monitor tasks and call onComplete when all tasks are done useEffect(() => { if (currentStep === null || !tasks || !isEmbedding) { @@ -190,11 +211,86 @@ const OnboardingCard = ({ task.status === "processing", ); + // Check if any file failed in completed tasks + const completedTasks = tasks.filter( + (task) => task.status === "completed" + ); + + // Check if any completed task has at least one failed file + const taskWithFailedFile = completedTasks.find((task) => { + // Must have files object + if (!task.files || typeof task.files !== "object") { + return false; + } + + const fileEntries = Object.values(task.files); + + // Must have at least one file + if (fileEntries.length === 0) { + return false; + } + + // Check if any file has failed status + const hasFailedFile = fileEntries.some( + (file) => file.status === "failed" || file.status === "error" + ); + + return hasFailedFile; + }); + + // If any file failed, show error and jump back one step (like onboardingMutation.onError) + // Only handle if we haven't already handled this task + if ( + taskWithFailedFile && + !rollbackMutation.isPending && + !isCompleted && + !handledFailedTasksRef.current.has(taskWithFailedFile.task_id) + ) { + console.error("File failed in task, jumping back one step", taskWithFailedFile); + + // Mark this task as handled to prevent infinite loops + handledFailedTasksRef.current.add(taskWithFailedFile.task_id); + + // Extract error messages from failed files + const errorMessages: string[] = []; + if (taskWithFailedFile.files) { + Object.values(taskWithFailedFile.files).forEach((file) => { + if ((file.status === "failed" || file.status === "error") && file.error) { + errorMessages.push(file.error); + } + }); + } + + // Also check task-level error + if (taskWithFailedFile.error) { + errorMessages.push(taskWithFailedFile.error); + } + + // Use the first error message, or a generic message if no errors found + const errorMessage = errorMessages.length > 0 + ? errorMessages[0] + : "Sample data file failed to ingest. Please try again with a different configuration."; + + // Set error message and jump back one step (exactly like onboardingMutation.onError) + setError(errorMessage); + setCurrentStep(totalSteps); + // Jump back one step after 1 second (go back to the step before ingestion) + // For embedding: totalSteps is 4, ingestion is step 3, so go back to step 2 + // For LLM: totalSteps is 3, ingestion is step 2, so go back to step 1 + setTimeout(() => { + // Go back to the step before the last step (which is ingestion) + const previousStep = totalSteps > 1 ? totalSteps - 2 : 0; + setCurrentStep(previousStep); + }, 1000); + return; + } + // If no active tasks and we've started onboarding, complete it if ( (!activeTasks || (activeTasks.processed_files ?? 0) > 0) && tasks.length > 0 && - !isCompleted + !isCompleted && + !taskWithFailedFile ) { // Set to final step to show "Done" setCurrentStep(totalSteps); @@ -203,7 +299,7 @@ const OnboardingCard = ({ onComplete(); }, 1000); } - }, [tasks, currentStep, onComplete, isCompleted, isEmbedding, totalSteps]); + }, [tasks, currentStep, onComplete, isCompleted, isEmbedding, totalSteps, rollbackMutation]); // Mutations const onboardingMutation = useOnboardingMutation({ @@ -507,7 +603,7 @@ const OnboardingCard = ({ hasEnvApiKey={ currentSettings?.providers?.openai?.has_api_key === true } - alreadyConfigured={providerAlreadyConfigured} + alreadyConfigured={providerAlreadyConfigured && modelProvider === "openai"} /> @@ -517,7 +613,7 @@ const OnboardingCard = ({ setSampleDataset={setSampleDataset} setIsLoadingModels={setIsLoadingModels} isEmbedding={isEmbedding} - alreadyConfigured={providerAlreadyConfigured} + alreadyConfigured={providerAlreadyConfigured && modelProvider === "watsonx"} existingEndpoint={currentSettings?.providers?.watsonx?.endpoint} existingProjectId={currentSettings?.providers?.watsonx?.project_id} hasEnvApiKey={currentSettings?.providers?.watsonx?.has_api_key === true} @@ -530,7 +626,7 @@ const OnboardingCard = ({ setSampleDataset={setSampleDataset} setIsLoadingModels={setIsLoadingModels} isEmbedding={isEmbedding} - alreadyConfigured={providerAlreadyConfigured} + alreadyConfigured={providerAlreadyConfigured && modelProvider === "ollama"} existingEndpoint={currentSettings?.providers?.ollama?.endpoint} /> diff --git a/frontend/app/onboarding/_components/onboarding-upload.tsx b/frontend/app/onboarding/_components/onboarding-upload.tsx index ce7a0f91..b434cce9 100644 --- a/frontend/app/onboarding/_components/onboarding-upload.tsx +++ b/frontend/app/onboarding/_components/onboarding-upload.tsx @@ -1,3 +1,4 @@ +import { X } from "lucide-react"; import { AnimatePresence, motion } from "motion/react"; import { type ChangeEvent, useEffect, useRef, useState } from "react"; import { toast } from "sonner"; @@ -7,13 +8,13 @@ import { useGetTasksQuery } from "@/app/api/queries/useGetTasksQuery"; import { AnimatedProviderSteps } from "@/app/onboarding/_components/animated-provider-steps"; import { Button } from "@/components/ui/button"; import { - ONBOARDING_UPLOAD_STEPS_KEY, - ONBOARDING_USER_DOC_FILTER_ID_KEY, + ONBOARDING_UPLOAD_STEPS_KEY, + ONBOARDING_USER_DOC_FILTER_ID_KEY, } from "@/lib/constants"; import { uploadFile } from "@/lib/upload-utils"; interface OnboardingUploadProps { - onComplete: () => void; + onComplete: () => void; } const OnboardingUpload = ({ onComplete }: OnboardingUploadProps) => { @@ -21,6 +22,7 @@ const OnboardingUpload = ({ onComplete }: OnboardingUploadProps) => { const [isUploading, setIsUploading] = useState(false); const [currentStep, setCurrentStep] = useState(null); const [uploadedFilename, setUploadedFilename] = useState(null); + const [uploadedTaskId, setUploadedTaskId] = useState(null); const [shouldCreateFilter, setShouldCreateFilter] = useState(false); const [isCreatingFilter, setIsCreatingFilter] = useState(false); @@ -43,23 +45,26 @@ const OnboardingUpload = ({ onComplete }: OnboardingUploadProps) => { // Monitor tasks and call onComplete when file processing is done useEffect(() => { - if (currentStep === null || !tasks) { + if (currentStep === null || !tasks || !uploadedTaskId) { return; } - // Check if there are any active tasks (pending, running, or processing) - const activeTasks = tasks.find( - (task) => - task.status === "pending" || - task.status === "running" || - task.status === "processing", - ); + // Find the task by task ID from the upload response + const matchingTask = tasks.find((task) => task.task_id === uploadedTaskId); - // If no active tasks and we have more than 1 task (initial + new upload), complete it - if ( - (!activeTasks || (activeTasks.processed_files ?? 0) > 0) && - tasks.length > 1 - ) { + // If no matching task found, wait for it to appear + if (!matchingTask) { + return; + } + + // Check if the matching task is still active (pending, running, or processing) + const isTaskActive = + matchingTask.status === "pending" || + matchingTask.status === "running" || + matchingTask.status === "processing"; + + // If task is completed or has processed files, complete the onboarding step + if (!isTaskActive || (matchingTask.processed_files ?? 0) > 0) { // Set to final step to show "Done" setCurrentStep(STEP_LIST.length); @@ -91,6 +96,7 @@ const OnboardingUpload = ({ onComplete }: OnboardingUploadProps) => { icon: "file", }); + // Wait for filter creation to complete before proceeding createFilterMutation .mutateAsync({ name: displayName, @@ -114,18 +120,36 @@ const OnboardingUpload = ({ onComplete }: OnboardingUploadProps) => { }) .finally(() => { setIsCreatingFilter(false); + // Refetch nudges to get new ones + refetchNudges(); + + // Wait a bit before completing (after filter is created) + setTimeout(() => { + onComplete(); + }, 1000); }); + } else { + // No filter to create, just complete + // Refetch nudges to get new ones + refetchNudges(); + + // Wait a bit before completing + setTimeout(() => { + onComplete(); + }, 1000); } - - // Refetch nudges to get new ones - refetchNudges(); - - // Wait a bit before completing - setTimeout(() => { - onComplete(); - }, 1000); } - }, [tasks, currentStep, onComplete, refetchNudges, shouldCreateFilter, uploadedFilename]); + }, [ + tasks, + currentStep, + onComplete, + refetchNudges, + shouldCreateFilter, + uploadedFilename, + uploadedTaskId, + createFilterMutation, + isCreatingFilter, + ]); const resetFileInput = () => { if (fileInputRef.current) { @@ -144,6 +168,11 @@ const OnboardingUpload = ({ onComplete }: OnboardingUploadProps) => { const result = await uploadFile(file, true, true); // Pass createFilter=true console.log("Document upload task started successfully"); + // Store task ID to track the specific upload task + if (result.taskId) { + setUploadedTaskId(result.taskId); + } + // Store filename and createFilter flag in state to create filter after ingestion succeeds if (result.createFilter && result.filename) { setUploadedFilename(result.filename); @@ -176,6 +205,7 @@ const OnboardingUpload = ({ onComplete }: OnboardingUploadProps) => { // Reset on error setCurrentStep(null); + setUploadedTaskId(null); } finally { setIsUploading(false); } diff --git a/frontend/components/chat-renderer.tsx b/frontend/components/chat-renderer.tsx index 6804b065..a0e2a9d4 100644 --- a/frontend/components/chat-renderer.tsx +++ b/frontend/components/chat-renderer.tsx @@ -47,8 +47,7 @@ export function ChatRenderer({ refreshConversations, startNewConversation, setConversationFilter, - setCurrentConversationId, - setPreviousResponseIds, + setOnboardingComplete, } = useChat(); // Initialize onboarding state based on local storage and settings @@ -170,12 +169,17 @@ export function ChatRenderer({ localStorage.removeItem(ONBOARDING_UPLOAD_STEPS_KEY); } - // Clear ALL conversation state so next message starts fresh - await startNewConversation(); + // Mark onboarding as complete in context + setOnboardingComplete(true); - // Store the user document filter as default for new conversations and load it + // Store the user document filter as default for new conversations FIRST + // This must happen before startNewConversation() so the filter is available await storeDefaultFilterForNewConversations(true); + // Clear ALL conversation state so next message starts fresh + // This will pick up the default filter we just set + await startNewConversation(); + // Clean up onboarding filter IDs now that we've set the default if (typeof window !== "undefined") { localStorage.removeItem(ONBOARDING_OPENRAG_DOCS_FILTER_ID_KEY); @@ -202,6 +206,8 @@ export function ChatRenderer({ localStorage.removeItem(ONBOARDING_CARD_STEPS_KEY); localStorage.removeItem(ONBOARDING_UPLOAD_STEPS_KEY); } + // Mark onboarding as complete in context + setOnboardingComplete(true); // Store the OpenRAG docs filter as default for new conversations storeDefaultFilterForNewConversations(false); setShowLayout(true); diff --git a/frontend/components/provider-health-banner.tsx b/frontend/components/provider-health-banner.tsx index 1a91a601..3bf5cfb1 100644 --- a/frontend/components/provider-health-banner.tsx +++ b/frontend/components/provider-health-banner.tsx @@ -5,125 +5,131 @@ import { useRouter } from "next/navigation"; import { useProviderHealthQuery } from "@/app/api/queries/useProviderHealthQuery"; import type { ModelProvider } from "@/app/settings/_helpers/model-helpers"; import { Banner, BannerIcon, BannerTitle } from "@/components/ui/banner"; -import { cn } from "@/lib/utils"; import { useChat } from "@/contexts/chat-context"; +import { cn } from "@/lib/utils"; import { Button } from "./ui/button"; interface ProviderHealthBannerProps { - className?: string; + className?: string; } // Custom hook to check provider health status export function useProviderHealth() { - const { hasChatError } = useChat(); - const { - data: health, - isLoading, - isFetching, - error, - isError, - } = useProviderHealthQuery({ - test_completion: hasChatError, // Use test_completion=true when chat errors occur - }); + const { hasChatError } = useChat(); + const { + data: health, + isLoading, + isFetching, + error, + isError, + } = useProviderHealthQuery({ + test_completion: hasChatError, // Use test_completion=true when chat errors occur + }); - const isHealthy = health?.status === "healthy" && !isError; - // Only consider unhealthy if backend is up but provider validation failed - // Don't show banner if backend is unavailable - const isUnhealthy = - health?.status === "unhealthy" || health?.status === "error"; - const isBackendUnavailable = - health?.status === "backend-unavailable" || isError; + const isHealthy = health?.status === "healthy" && !isError; + // Only consider unhealthy if backend is up but provider validation failed + // Don't show banner if backend is unavailable + const isUnhealthy = + health?.status === "unhealthy" || health?.status === "error"; + const isBackendUnavailable = + health?.status === "backend-unavailable" || isError; - return { - health, - isLoading, - isFetching, - error, - isError, - isHealthy, - isUnhealthy, - isBackendUnavailable, - }; + return { + health, + isLoading, + isFetching, + error, + isError, + isHealthy, + isUnhealthy, + isBackendUnavailable, + }; } const providerTitleMap: Record = { - openai: "OpenAI", - anthropic: "Anthropic", - ollama: "Ollama", - watsonx: "IBM watsonx.ai", + openai: "OpenAI", + anthropic: "Anthropic", + ollama: "Ollama", + watsonx: "IBM watsonx.ai", }; export function ProviderHealthBanner({ className }: ProviderHealthBannerProps) { - const { isLoading, isHealthy, isUnhealthy, health } = useProviderHealth(); - const router = useRouter(); + const { isLoading, isHealthy, isUnhealthy, health } = useProviderHealth(); + const router = useRouter(); - // Only show banner when provider is unhealthy (not when backend is unavailable) - if (isLoading || isHealthy) { - return null; - } + // Only show banner when provider is unhealthy (not when backend is unavailable) + if (isLoading || isHealthy) { + return null; + } - if (isUnhealthy) { - const llmProvider = health?.llm_provider || health?.provider; - const embeddingProvider = health?.embedding_provider; - const llmError = health?.llm_error; - const embeddingError = health?.embedding_error; + if (isUnhealthy) { + const llmProvider = health?.llm_provider || health?.provider; + const embeddingProvider = health?.embedding_provider; + const llmError = health?.llm_error; + const embeddingError = health?.embedding_error; - // Determine which provider has the error - let errorProvider: string | undefined; - let errorMessage: string; + // Determine which provider has the error + let errorProvider: string | undefined; + let errorMessage: string; - if (llmError && embeddingError) { - // Both have errors - show combined message - errorMessage = health?.message || "Provider validation failed"; - errorProvider = undefined; // Don't link to a specific provider - } else if (llmError) { - // Only LLM has error - errorProvider = llmProvider; - errorMessage = llmError; - } else if (embeddingError) { - // Only embedding has error - errorProvider = embeddingProvider; - errorMessage = embeddingError; - } else { - // Fallback to original message - errorMessage = health?.message || "Provider validation failed"; - errorProvider = llmProvider; - } + if (llmError && embeddingError) { + // Both have errors - check if they're the same + if (llmError === embeddingError) { + // Same error for both - show once + errorMessage = llmError; + } else { + // Different errors - show both + errorMessage = `${llmError}; ${embeddingError}`; + } + errorProvider = undefined; // Don't link to a specific provider + } else if (llmError) { + // Only LLM has error + errorProvider = llmProvider; + errorMessage = llmError; + } else if (embeddingError) { + // Only embedding has error + errorProvider = embeddingProvider; + errorMessage = embeddingError; + } else { + // Fallback to original message + errorMessage = health?.message || "Provider validation failed"; + errorProvider = llmProvider; + } - const providerTitle = errorProvider - ? providerTitleMap[errorProvider as ModelProvider] || errorProvider - : "Provider"; + const providerTitle = errorProvider + ? providerTitleMap[errorProvider as ModelProvider] || errorProvider + : "Provider"; - const settingsUrl = errorProvider - ? `/settings?setup=${errorProvider}` - : "/settings"; + const settingsUrl = errorProvider + ? `/settings?setup=${errorProvider}` + : "/settings"; - return ( - - - - {llmError && embeddingError ? ( - <>Provider errors - {errorMessage} - ) : ( - <> - {providerTitle} error - {errorMessage} - - )} - - - - ); - } + return ( + + + + {llmError && embeddingError ? ( + <>Provider errors - {errorMessage} + ) : ( + <> + {providerTitle} error - {errorMessage} + + )} + + + + ); + } - return null; + return null; } diff --git a/frontend/contexts/chat-context.tsx b/frontend/contexts/chat-context.tsx index 59b5edeb..f6f1a1e4 100644 --- a/frontend/contexts/chat-context.tsx +++ b/frontend/contexts/chat-context.tsx @@ -10,6 +10,8 @@ import { useRef, useState, } from "react"; +import { ONBOARDING_STEP_KEY } from "@/lib/constants"; +import { useGetSettingsQuery } from "@/app/api/queries/useGetSettingsQuery"; export type EndpointType = "chat" | "langflow"; @@ -81,6 +83,8 @@ interface ChatContextType { setConversationFilter: (filter: KnowledgeFilter | null, responseId?: string | null) => void; hasChatError: boolean; setChatError: (hasError: boolean) => void; + isOnboardingComplete: boolean; + setOnboardingComplete: (complete: boolean) => void; } const ChatContext = createContext(undefined); @@ -111,6 +115,46 @@ export function ChatProvider({ children }: ChatProviderProps) { const [conversationFilter, setConversationFilterState] = useState(null); const [hasChatError, setChatError] = useState(false); + + // Get settings to check if onboarding was completed (settings.edited) + const { data: settings } = useGetSettingsQuery(); + + // Check if onboarding is complete + // Onboarding is complete if: + // 1. settings.edited is true (backend confirms onboarding was completed) + // 2. AND onboarding step key is null (local onboarding flow is done) + const [isOnboardingComplete, setIsOnboardingComplete] = useState(() => { + if (typeof window === "undefined") return false; + // Default to false if settings not loaded yet + return false; + }); + + // Sync onboarding completion state with settings.edited and localStorage + useEffect(() => { + const checkOnboarding = () => { + if (typeof window !== "undefined") { + // Onboarding is complete if settings.edited is true AND step key is null + const stepKeyExists = localStorage.getItem(ONBOARDING_STEP_KEY) !== null; + const isEdited = settings?.edited === true; + // Complete if edited is true and step key doesn't exist (onboarding flow finished) + setIsOnboardingComplete(isEdited && !stepKeyExists); + } + }; + + // Check on mount and when settings change + checkOnboarding(); + + // Listen for storage events (for cross-tab sync) + window.addEventListener("storage", checkOnboarding); + + return () => { + window.removeEventListener("storage", checkOnboarding); + }; + }, [settings?.edited]); + + const setOnboardingComplete = useCallback((complete: boolean) => { + setIsOnboardingComplete(complete); + }, []); // Listen for ingestion failures and set chat error flag useEffect(() => { @@ -228,6 +272,10 @@ export function ChatProvider({ children }: ChatProviderProps) { const startNewConversation = useCallback(async () => { console.log("[CONVERSATION] Starting new conversation"); + // Check if there's existing conversation data - if so, this is a manual "new conversation" action + // Check state values before clearing them + const hasExistingConversation = conversationData !== null || placeholderConversation !== null; + // Clear current conversation data and reset state setCurrentConversationId(null); setPreviousResponseIds({ chat: null, langflow: null }); @@ -261,15 +309,22 @@ export function ChatProvider({ children }: ChatProviderProps) { setConversationFilterState(null); } } else { - console.log("[CONVERSATION] No default filter set"); - setConversationFilterState(null); + // No default filter in localStorage + if (hasExistingConversation) { + // User is manually starting a new conversation - clear the filter + console.log("[CONVERSATION] Manual new conversation - clearing filter"); + setConversationFilterState(null); + } else { + // First time after onboarding - preserve existing filter if set + // This prevents clearing the filter when startNewConversation is called multiple times during onboarding + console.log("[CONVERSATION] No default filter set, preserving existing filter if any"); + // Don't clear the filter - it may have been set by storeDefaultFilterForNewConversations + } } - } else { - setConversationFilterState(null); } // Create a temporary placeholder conversation to show in sidebar - const placeholderConversation: ConversationData = { + const newPlaceholderConversation: ConversationData = { response_id: "new-conversation-" + Date.now(), title: "New conversation", endpoint: endpoint, @@ -284,10 +339,10 @@ export function ChatProvider({ children }: ChatProviderProps) { last_activity: new Date().toISOString(), }; - setPlaceholderConversation(placeholderConversation); + setPlaceholderConversation(newPlaceholderConversation); // Force immediate refresh to ensure sidebar shows correct state refreshConversations(true); - }, [endpoint, refreshConversations]); + }, [endpoint, refreshConversations, conversationData, placeholderConversation]); const addConversationDoc = useCallback((filename: string) => { setConversationDocs((prev) => [ @@ -375,6 +430,8 @@ export function ChatProvider({ children }: ChatProviderProps) { setConversationFilter, hasChatError, setChatError, + isOnboardingComplete, + setOnboardingComplete, }), [ endpoint, @@ -396,6 +453,8 @@ export function ChatProvider({ children }: ChatProviderProps) { conversationFilter, setConversationFilter, hasChatError, + isOnboardingComplete, + setOnboardingComplete, ], ); diff --git a/frontend/lib/upload-utils.ts b/frontend/lib/upload-utils.ts index 9892bde7..ad09bb3b 100644 --- a/frontend/lib/upload-utils.ts +++ b/frontend/lib/upload-utils.ts @@ -12,6 +12,7 @@ export interface UploadFileResult { raw: unknown; createFilter?: boolean; filename?: string; + taskId?: string; } export async function duplicateCheck( @@ -158,6 +159,7 @@ export async function uploadFile( (uploadIngestJson as { upload?: { id?: string } }).upload?.id || (uploadIngestJson as { id?: string }).id || (uploadIngestJson as { task_id?: string }).task_id; + const taskId = (uploadIngestJson as { task_id?: string }).task_id; const filePath = (uploadIngestJson as { upload?: { path?: string } }).upload?.path || (uploadIngestJson as { path?: string }).path || @@ -197,6 +199,7 @@ export async function uploadFile( raw: uploadIngestJson, createFilter: shouldCreateFilter, filename, + taskId, }; return result; diff --git a/frontend/package-lock.json b/frontend/package-lock.json index 84c50ecd..af0628ee 100644 --- a/frontend/package-lock.json +++ b/frontend/package-lock.json @@ -38,7 +38,7 @@ "dotenv": "^17.2.3", "lucide-react": "^0.525.0", "motion": "^12.23.12", - "next": "15.3.5", + "next": "15.5.7", "next-themes": "^0.4.6", "react": "^19.0.0", "react-dom": "^19.0.0", @@ -1169,9 +1169,9 @@ } }, "node_modules/@next/env": { - "version": "15.3.5", - "resolved": "https://registry.npmjs.org/@next/env/-/env-15.3.5.tgz", - "integrity": "sha512-7g06v8BUVtN2njAX/r8gheoVffhiKFVt4nx74Tt6G4Hqw9HCLYQVx/GkH2qHvPtAHZaUNZ0VXAa0pQP6v1wk7g==", + "version": "15.5.7", + "resolved": "https://registry.npmjs.org/@next/env/-/env-15.5.7.tgz", + "integrity": "sha512-4h6Y2NyEkIEN7Z8YxkA27pq6zTkS09bUSYC0xjd0NpwFxjnIKeZEeH591o5WECSmjpUhLn3H2QLJcDye3Uzcvg==", "license": "MIT" }, "node_modules/@next/eslint-plugin-next": { @@ -1185,9 +1185,9 @@ } }, "node_modules/@next/swc-darwin-arm64": { - "version": "15.3.5", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-15.3.5.tgz", - "integrity": "sha512-lM/8tilIsqBq+2nq9kbTW19vfwFve0NR7MxfkuSUbRSgXlMQoJYg+31+++XwKVSXk4uT23G2eF/7BRIKdn8t8w==", + "version": "15.5.7", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-15.5.7.tgz", + "integrity": "sha512-IZwtxCEpI91HVU/rAUOOobWSZv4P2DeTtNaCdHqLcTJU4wdNXgAySvKa/qJCgR5m6KI8UsKDXtO2B31jcaw1Yw==", "cpu": [ "arm64" ], @@ -1201,9 +1201,9 @@ } }, "node_modules/@next/swc-darwin-x64": { - "version": "15.3.5", - "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-15.3.5.tgz", - "integrity": "sha512-WhwegPQJ5IfoUNZUVsI9TRAlKpjGVK0tpJTL6KeiC4cux9774NYE9Wu/iCfIkL/5J8rPAkqZpG7n+EfiAfidXA==", + "version": "15.5.7", + "resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-15.5.7.tgz", + "integrity": "sha512-UP6CaDBcqaCBuiq/gfCEJw7sPEoX1aIjZHnBWN9v9qYHQdMKvCKcAVs4OX1vIjeE+tC5EIuwDTVIoXpUes29lg==", "cpu": [ "x64" ], @@ -1217,9 +1217,9 @@ } }, "node_modules/@next/swc-linux-arm64-gnu": { - "version": "15.3.5", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-15.3.5.tgz", - "integrity": "sha512-LVD6uMOZ7XePg3KWYdGuzuvVboxujGjbcuP2jsPAN3MnLdLoZUXKRc6ixxfs03RH7qBdEHCZjyLP/jBdCJVRJQ==", + "version": "15.5.7", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-15.5.7.tgz", + "integrity": "sha512-NCslw3GrNIw7OgmRBxHtdWFQYhexoUCq+0oS2ccjyYLtcn1SzGzeM54jpTFonIMUjNbHmpKpziXnpxhSWLcmBA==", "cpu": [ "arm64" ], @@ -1233,9 +1233,9 @@ } }, "node_modules/@next/swc-linux-arm64-musl": { - "version": "15.3.5", - "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-15.3.5.tgz", - "integrity": "sha512-k8aVScYZ++BnS2P69ClK7v4nOu702jcF9AIHKu6llhHEtBSmM2zkPGl9yoqbSU/657IIIb0QHpdxEr0iW9z53A==", + "version": "15.5.7", + "resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-15.5.7.tgz", + "integrity": "sha512-nfymt+SE5cvtTrG9u1wdoxBr9bVB7mtKTcj0ltRn6gkP/2Nu1zM5ei8rwP9qKQP0Y//umK+TtkKgNtfboBxRrw==", "cpu": [ "arm64" ], @@ -1249,9 +1249,9 @@ } }, "node_modules/@next/swc-linux-x64-gnu": { - "version": "15.3.5", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-15.3.5.tgz", - "integrity": "sha512-2xYU0DI9DGN/bAHzVwADid22ba5d/xrbrQlr2U+/Q5WkFUzeL0TDR963BdrtLS/4bMmKZGptLeg6282H/S2i8A==", + "version": "15.5.7", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-15.5.7.tgz", + "integrity": "sha512-hvXcZvCaaEbCZcVzcY7E1uXN9xWZfFvkNHwbe/n4OkRhFWrs1J1QV+4U1BN06tXLdaS4DazEGXwgqnu/VMcmqw==", "cpu": [ "x64" ], @@ -1265,9 +1265,9 @@ } }, "node_modules/@next/swc-linux-x64-musl": { - "version": "15.3.5", - "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-15.3.5.tgz", - "integrity": "sha512-TRYIqAGf1KCbuAB0gjhdn5Ytd8fV+wJSM2Nh2is/xEqR8PZHxfQuaiNhoF50XfY90sNpaRMaGhF6E+qjV1b9Tg==", + "version": "15.5.7", + "resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-15.5.7.tgz", + "integrity": "sha512-4IUO539b8FmF0odY6/SqANJdgwn1xs1GkPO5doZugwZ3ETF6JUdckk7RGmsfSf7ws8Qb2YB5It33mvNL/0acqA==", "cpu": [ "x64" ], @@ -1281,9 +1281,9 @@ } }, "node_modules/@next/swc-win32-arm64-msvc": { - "version": "15.3.5", - "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-15.3.5.tgz", - "integrity": "sha512-h04/7iMEUSMY6fDGCvdanKqlO1qYvzNxntZlCzfE8i5P0uqzVQWQquU1TIhlz0VqGQGXLrFDuTJVONpqGqjGKQ==", + "version": "15.5.7", + "resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-15.5.7.tgz", + "integrity": "sha512-CpJVTkYI3ZajQkC5vajM7/ApKJUOlm6uP4BknM3XKvJ7VXAvCqSjSLmM0LKdYzn6nBJVSjdclx8nYJSa3xlTgQ==", "cpu": [ "arm64" ], @@ -1297,9 +1297,9 @@ } }, "node_modules/@next/swc-win32-x64-msvc": { - "version": "15.3.5", - "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-15.3.5.tgz", - "integrity": "sha512-5fhH6fccXxnX2KhllnGhkYMndhOiLOLEiVGYjP2nizqeGWkN10sA9taATlXwake2E2XMvYZjjz0Uj7T0y+z1yw==", + "version": "15.5.7", + "resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-15.5.7.tgz", + "integrity": "sha512-gMzgBX164I6DN+9/PGA+9dQiwmTkE4TloBNx8Kv9UiGARsr9Nba7IpcBRA1iTV9vwlYnrE3Uy6I7Aj6qLjQuqw==", "cpu": [ "x64" ], @@ -2568,12 +2568,6 @@ "dev": true, "license": "MIT" }, - "node_modules/@swc/counter": { - "version": "0.1.3", - "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.3.tgz", - "integrity": "sha512-e2BR4lsJkkRlKZ/qCHPw9ZaSxc0MVUd7gtbtaB7aMvHeJVYe8sOB8DBZkP2DtISHGSku9sCK6T6cnY0CtXrOCQ==", - "license": "Apache-2.0" - }, "node_modules/@swc/helpers": { "version": "0.5.15", "resolved": "https://registry.npmjs.org/@swc/helpers/-/helpers-0.5.15.tgz", @@ -3821,17 +3815,6 @@ "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" } }, - "node_modules/busboy": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz", - "integrity": "sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==", - "dependencies": { - "streamsearch": "^1.1.0" - }, - "engines": { - "node": ">=10.16.0" - } - }, "node_modules/call-bind": { "version": "1.0.8", "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz", @@ -5448,9 +5431,9 @@ } }, "node_modules/glob": { - "version": "10.4.5", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", - "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "version": "10.5.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz", + "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==", "license": "ISC", "dependencies": { "foreground-child": "^3.1.0", @@ -6584,9 +6567,9 @@ "license": "MIT" }, "node_modules/js-yaml": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", - "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", "dev": true, "license": "MIT", "dependencies": { @@ -7194,9 +7177,10 @@ } }, "node_modules/mdast-util-to-hast": { - "version": "13.2.0", - "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.0.tgz", - "integrity": "sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA==", + "version": "13.2.1", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.1.tgz", + "integrity": "sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA==", + "license": "MIT", "dependencies": { "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", @@ -7973,15 +7957,13 @@ "license": "MIT" }, "node_modules/next": { - "version": "15.3.5", - "resolved": "https://registry.npmjs.org/next/-/next-15.3.5.tgz", - "integrity": "sha512-RkazLBMMDJSJ4XZQ81kolSpwiCt907l0xcgcpF4xC2Vml6QVcPNXW0NQRwQ80FFtSn7UM52XN0anaw8TEJXaiw==", + "version": "15.5.7", + "resolved": "https://registry.npmjs.org/next/-/next-15.5.7.tgz", + "integrity": "sha512-+t2/0jIJ48kUpGKkdlhgkv+zPTEOoXyr60qXe68eB/pl3CMJaLeIGjzp5D6Oqt25hCBiBTt8wEeeAzfJvUKnPQ==", "license": "MIT", "dependencies": { - "@next/env": "15.3.5", - "@swc/counter": "0.1.3", + "@next/env": "15.5.7", "@swc/helpers": "0.5.15", - "busboy": "1.6.0", "caniuse-lite": "^1.0.30001579", "postcss": "8.4.31", "styled-jsx": "5.1.6" @@ -7993,19 +7975,19 @@ "node": "^18.18.0 || ^19.8.0 || >= 20.0.0" }, "optionalDependencies": { - "@next/swc-darwin-arm64": "15.3.5", - "@next/swc-darwin-x64": "15.3.5", - "@next/swc-linux-arm64-gnu": "15.3.5", - "@next/swc-linux-arm64-musl": "15.3.5", - "@next/swc-linux-x64-gnu": "15.3.5", - "@next/swc-linux-x64-musl": "15.3.5", - "@next/swc-win32-arm64-msvc": "15.3.5", - "@next/swc-win32-x64-msvc": "15.3.5", - "sharp": "^0.34.1" + "@next/swc-darwin-arm64": "15.5.7", + "@next/swc-darwin-x64": "15.5.7", + "@next/swc-linux-arm64-gnu": "15.5.7", + "@next/swc-linux-arm64-musl": "15.5.7", + "@next/swc-linux-x64-gnu": "15.5.7", + "@next/swc-linux-x64-musl": "15.5.7", + "@next/swc-win32-arm64-msvc": "15.5.7", + "@next/swc-win32-x64-msvc": "15.5.7", + "sharp": "^0.34.3" }, "peerDependencies": { "@opentelemetry/api": "^1.1.0", - "@playwright/test": "^1.41.2", + "@playwright/test": "^1.51.1", "babel-plugin-react-compiler": "*", "react": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", "react-dom": "^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0", @@ -9492,14 +9474,6 @@ "node": ">= 0.4" } }, - "node_modules/streamsearch": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz", - "integrity": "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==", - "engines": { - "node": ">=10.0.0" - } - }, "node_modules/string-width": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", diff --git a/frontend/package.json b/frontend/package.json index 0913cc4c..2e2dabc7 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -41,7 +41,7 @@ "dotenv": "^17.2.3", "lucide-react": "^0.525.0", "motion": "^12.23.12", - "next": "15.3.5", + "next": "15.5.7", "next-themes": "^0.4.6", "react": "^19.0.0", "react-dom": "^19.0.0", diff --git a/src/agent.py b/src/agent.py index bd4d257f..dd092643 100644 --- a/src/agent.py +++ b/src/agent.py @@ -47,8 +47,8 @@ def get_conversation_thread(user_id: str, previous_response_id: str = None): return new_conversation -def store_conversation_thread(user_id: str, response_id: str, conversation_state: dict): - """Store conversation both in memory (with function calls) and persist metadata to disk""" +async def store_conversation_thread(user_id: str, response_id: str, conversation_state: dict): + """Store conversation both in memory (with function calls) and persist metadata to disk (async, non-blocking)""" # 1. Store full conversation in memory for function call preservation if user_id not in active_conversations: active_conversations[user_id] = {} @@ -76,7 +76,7 @@ def store_conversation_thread(user_id: str, response_id: str, conversation_state # Don't store actual messages - Langflow has them } - conversation_persistence.store_conversation_thread( + await conversation_persistence.store_conversation_thread( user_id, response_id, metadata_only ) @@ -382,7 +382,7 @@ async def async_chat( # Store the conversation thread with its response_id if response_id: conversation_state["last_activity"] = datetime.now() - store_conversation_thread(user_id, response_id, conversation_state) + await store_conversation_thread(user_id, response_id, conversation_state) logger.debug( "Stored conversation thread", user_id=user_id, response_id=response_id ) @@ -461,7 +461,7 @@ async def async_chat_stream( # Store the conversation thread with its response_id if response_id: conversation_state["last_activity"] = datetime.now() - store_conversation_thread(user_id, response_id, conversation_state) + await store_conversation_thread(user_id, response_id, conversation_state) logger.debug( f"Stored conversation thread for user {user_id} with response_id: {response_id}" ) @@ -549,7 +549,7 @@ async def async_langflow_chat( # Store the conversation thread with its response_id if response_id: conversation_state["last_activity"] = datetime.now() - store_conversation_thread(user_id, response_id, conversation_state) + await store_conversation_thread(user_id, response_id, conversation_state) # Claim session ownership for this user try: @@ -656,7 +656,7 @@ async def async_langflow_chat_stream( # Store the conversation thread with its response_id if response_id: conversation_state["last_activity"] = datetime.now() - store_conversation_thread(user_id, response_id, conversation_state) + await store_conversation_thread(user_id, response_id, conversation_state) # Claim session ownership for this user try: @@ -672,8 +672,8 @@ async def async_langflow_chat_stream( ) -def delete_user_conversation(user_id: str, response_id: str) -> bool: - """Delete a conversation for a user from both memory and persistent storage""" +async def delete_user_conversation(user_id: str, response_id: str) -> bool: + """Delete a conversation for a user from both memory and persistent storage (async, non-blocking)""" deleted = False try: @@ -684,7 +684,7 @@ def delete_user_conversation(user_id: str, response_id: str) -> bool: deleted = True # Delete from persistent storage - conversation_deleted = conversation_persistence.delete_conversation_thread(user_id, response_id) + conversation_deleted = await conversation_persistence.delete_conversation_thread(user_id, response_id) if conversation_deleted: logger.debug(f"Deleted conversation {response_id} from persistent storage for user {user_id}") deleted = True diff --git a/src/api/provider_validation.py b/src/api/provider_validation.py index c4307003..813826a1 100644 --- a/src/api/provider_validation.py +++ b/src/api/provider_validation.py @@ -1,5 +1,6 @@ """Provider validation utilities for testing API keys and models during onboarding.""" +import json import httpx from utils.container_utils import transform_localhost_url from utils.logging_config import get_logger @@ -7,6 +8,106 @@ from utils.logging_config import get_logger logger = get_logger(__name__) +def _parse_json_error_message(error_text: str) -> str: + """Parse JSON error message and extract just the message field.""" + try: + # Try to parse as JSON + error_data = json.loads(error_text) + + if isinstance(error_data, dict): + # WatsonX format: {"errors": [{"code": "...", "message": "..."}], ...} + if "errors" in error_data and isinstance(error_data["errors"], list): + errors = error_data["errors"] + if len(errors) > 0 and isinstance(errors[0], dict): + message = errors[0].get("message", "") + if message: + return message + code = errors[0].get("code", "") + if code: + return f"Error: {code}" + + # OpenAI format: {"error": {"message": "...", "type": "...", "code": "..."}} + if "error" in error_data: + error_obj = error_data["error"] + if isinstance(error_obj, dict): + message = error_obj.get("message", "") + if message: + return message + + # Direct message field + if "message" in error_data: + return error_data["message"] + + # Generic format: {"detail": "..."} + if "detail" in error_data: + return error_data["detail"] + except (json.JSONDecodeError, ValueError, TypeError): + pass + + # Return original text if not JSON or can't parse + return error_text + + +def _extract_error_details(response: httpx.Response) -> str: + """Extract detailed error message from API response.""" + try: + # Try to parse JSON error response + error_data = response.json() + + # Common error response formats + if isinstance(error_data, dict): + # WatsonX format: {"errors": [{"code": "...", "message": "..."}], ...} + if "errors" in error_data and isinstance(error_data["errors"], list): + errors = error_data["errors"] + if len(errors) > 0 and isinstance(errors[0], dict): + # Extract just the message from the first error + message = errors[0].get("message", "") + if message: + return message + # Fallback to code if no message + code = errors[0].get("code", "") + if code: + return f"Error: {code}" + + # OpenAI format: {"error": {"message": "...", "type": "...", "code": "..."}} + if "error" in error_data: + error_obj = error_data["error"] + if isinstance(error_obj, dict): + message = error_obj.get("message", "") + error_type = error_obj.get("type", "") + code = error_obj.get("code", "") + if message: + details = message + if error_type: + details += f" (type: {error_type})" + if code: + details += f" (code: {code})" + return details + + # Anthropic format: {"error": {"message": "...", "type": "..."}} + if "message" in error_data: + return error_data["message"] + + # Generic format: {"message": "..."} + if "detail" in error_data: + return error_data["detail"] + + # If JSON parsing worked but no structured error found, try parsing text + response_text = response.text[:500] + parsed = _parse_json_error_message(response_text) + if parsed != response_text: + return parsed + return response_text + + except (json.JSONDecodeError, ValueError): + # If JSON parsing fails, try parsing the text as JSON string + response_text = response.text[:500] if response.text else f"HTTP {response.status_code}" + parsed = _parse_json_error_message(response_text) + if parsed != response_text: + return parsed + return response_text + + async def validate_provider_setup( provider: str, api_key: str = None, @@ -30,7 +131,7 @@ async def validate_provider_setup( If False, performs lightweight validation (no credits consumed). Default: False. Raises: - Exception: If validation fails with message "Setup failed, please try again or select a different provider." + Exception: If validation fails, raises the original exception with the actual error message. """ provider_lower = provider.lower() @@ -70,7 +171,8 @@ async def validate_provider_setup( except Exception as e: logger.error(f"Validation failed for provider {provider_lower}: {str(e)}") - raise Exception("Setup failed, please try again or select a different provider.") + # Preserve the original error message instead of replacing it with a generic one + raise async def test_lightweight_health( @@ -155,8 +257,9 @@ async def _test_openai_lightweight_health(api_key: str) -> None: ) if response.status_code != 200: - logger.error(f"OpenAI lightweight health check failed: {response.status_code}") - raise Exception(f"OpenAI API key validation failed: {response.status_code}") + error_details = _extract_error_details(response) + logger.error(f"OpenAI lightweight health check failed: {response.status_code} - {error_details}") + raise Exception(f"OpenAI API key validation failed: {error_details}") logger.info("OpenAI lightweight health check passed") @@ -225,8 +328,9 @@ async def _test_openai_completion_with_tools(api_key: str, llm_model: str) -> No ) if response.status_code != 200: - logger.error(f"OpenAI completion test failed: {response.status_code} - {response.text}") - raise Exception(f"OpenAI API error: {response.status_code}") + error_details = _extract_error_details(response) + logger.error(f"OpenAI completion test failed: {response.status_code} - {error_details}") + raise Exception(f"OpenAI API error: {error_details}") logger.info("OpenAI completion with tool calling test passed") @@ -260,8 +364,9 @@ async def _test_openai_embedding(api_key: str, embedding_model: str) -> None: ) if response.status_code != 200: - logger.error(f"OpenAI embedding test failed: {response.status_code} - {response.text}") - raise Exception(f"OpenAI API error: {response.status_code}") + error_details = _extract_error_details(response) + logger.error(f"OpenAI embedding test failed: {response.status_code} - {error_details}") + raise Exception(f"OpenAI API error: {error_details}") data = response.json() if not data.get("data") or len(data["data"]) == 0: @@ -300,8 +405,9 @@ async def _test_watsonx_lightweight_health( ) if token_response.status_code != 200: - logger.error(f"IBM IAM token request failed: {token_response.status_code}") - raise Exception("Failed to authenticate with IBM Watson - invalid API key") + error_details = _extract_error_details(token_response) + logger.error(f"IBM IAM token request failed: {token_response.status_code} - {error_details}") + raise Exception(f"Failed to authenticate with IBM Watson: {error_details}") bearer_token = token_response.json().get("access_token") if not bearer_token: @@ -335,8 +441,9 @@ async def _test_watsonx_completion_with_tools( ) if token_response.status_code != 200: - logger.error(f"IBM IAM token request failed: {token_response.status_code}") - raise Exception("Failed to authenticate with IBM Watson") + error_details = _extract_error_details(token_response) + logger.error(f"IBM IAM token request failed: {token_response.status_code} - {error_details}") + raise Exception(f"Failed to authenticate with IBM Watson: {error_details}") bearer_token = token_response.json().get("access_token") if not bearer_token: @@ -388,8 +495,11 @@ async def _test_watsonx_completion_with_tools( ) if response.status_code != 200: - logger.error(f"IBM Watson completion test failed: {response.status_code} - {response.text}") - raise Exception(f"IBM Watson API error: {response.status_code}") + error_details = _extract_error_details(response) + logger.error(f"IBM Watson completion test failed: {response.status_code} - {error_details}") + # If error_details is still JSON, parse it to extract just the message + parsed_details = _parse_json_error_message(error_details) + raise Exception(f"IBM Watson API error: {parsed_details}") logger.info("IBM Watson completion with tool calling test passed") @@ -398,6 +508,13 @@ async def _test_watsonx_completion_with_tools( raise Exception("Request timed out") except Exception as e: logger.error(f"IBM Watson completion test failed: {str(e)}") + # If the error message contains JSON, parse it to extract just the message + error_str = str(e) + if "IBM Watson API error: " in error_str: + json_part = error_str.split("IBM Watson API error: ", 1)[1] + parsed_message = _parse_json_error_message(json_part) + if parsed_message != json_part: + raise Exception(f"IBM Watson API error: {parsed_message}") raise @@ -419,8 +536,9 @@ async def _test_watsonx_embedding( ) if token_response.status_code != 200: - logger.error(f"IBM IAM token request failed: {token_response.status_code}") - raise Exception("Failed to authenticate with IBM Watson") + error_details = _extract_error_details(token_response) + logger.error(f"IBM IAM token request failed: {token_response.status_code} - {error_details}") + raise Exception(f"Failed to authenticate with IBM Watson: {error_details}") bearer_token = token_response.json().get("access_token") if not bearer_token: @@ -450,8 +568,11 @@ async def _test_watsonx_embedding( ) if response.status_code != 200: - logger.error(f"IBM Watson embedding test failed: {response.status_code} - {response.text}") - raise Exception(f"IBM Watson API error: {response.status_code}") + error_details = _extract_error_details(response) + logger.error(f"IBM Watson embedding test failed: {response.status_code} - {error_details}") + # If error_details is still JSON, parse it to extract just the message + parsed_details = _parse_json_error_message(error_details) + raise Exception(f"IBM Watson API error: {parsed_details}") data = response.json() if not data.get("results") or len(data["results"]) == 0: @@ -464,6 +585,13 @@ async def _test_watsonx_embedding( raise Exception("Request timed out") except Exception as e: logger.error(f"IBM Watson embedding test failed: {str(e)}") + # If the error message contains JSON, parse it to extract just the message + error_str = str(e) + if "IBM Watson API error: " in error_str: + json_part = error_str.split("IBM Watson API error: ", 1)[1] + parsed_message = _parse_json_error_message(json_part) + if parsed_message != json_part: + raise Exception(f"IBM Watson API error: {parsed_message}") raise @@ -483,8 +611,9 @@ async def _test_ollama_lightweight_health(endpoint: str) -> None: ) if response.status_code != 200: - logger.error(f"Ollama lightweight health check failed: {response.status_code}") - raise Exception(f"Ollama endpoint not responding: {response.status_code}") + error_details = _extract_error_details(response) + logger.error(f"Ollama lightweight health check failed: {response.status_code} - {error_details}") + raise Exception(f"Ollama endpoint not responding: {error_details}") logger.info("Ollama lightweight health check passed") @@ -537,8 +666,9 @@ async def _test_ollama_completion_with_tools(llm_model: str, endpoint: str) -> N ) if response.status_code != 200: - logger.error(f"Ollama completion test failed: {response.status_code} - {response.text}") - raise Exception(f"Ollama API error: {response.status_code}") + error_details = _extract_error_details(response) + logger.error(f"Ollama completion test failed: {response.status_code} - {error_details}") + raise Exception(f"Ollama API error: {error_details}") logger.info("Ollama completion with tool calling test passed") @@ -569,8 +699,9 @@ async def _test_ollama_embedding(embedding_model: str, endpoint: str) -> None: ) if response.status_code != 200: - logger.error(f"Ollama embedding test failed: {response.status_code} - {response.text}") - raise Exception(f"Ollama API error: {response.status_code}") + error_details = _extract_error_details(response) + logger.error(f"Ollama embedding test failed: {response.status_code} - {error_details}") + raise Exception(f"Ollama API error: {error_details}") data = response.json() if not data.get("embedding"): @@ -616,8 +747,9 @@ async def _test_anthropic_lightweight_health(api_key: str) -> None: ) if response.status_code != 200: - logger.error(f"Anthropic lightweight health check failed: {response.status_code}") - raise Exception(f"Anthropic API key validation failed: {response.status_code}") + error_details = _extract_error_details(response) + logger.error(f"Anthropic lightweight health check failed: {response.status_code} - {error_details}") + raise Exception(f"Anthropic API key validation failed: {error_details}") logger.info("Anthropic lightweight health check passed") @@ -672,8 +804,9 @@ async def _test_anthropic_completion_with_tools(api_key: str, llm_model: str) -> ) if response.status_code != 200: - logger.error(f"Anthropic completion test failed: {response.status_code} - {response.text}") - raise Exception(f"Anthropic API error: {response.status_code}") + error_details = _extract_error_details(response) + logger.error(f"Anthropic completion test failed: {response.status_code} - {error_details}") + raise Exception(f"Anthropic API error: {error_details}") logger.info("Anthropic completion with tool calling test passed") diff --git a/src/api/settings.py b/src/api/settings.py index af43b03a..982d9272 100644 --- a/src/api/settings.py +++ b/src/api/settings.py @@ -897,7 +897,7 @@ async def onboarding(request, flows_service, session_manager=None): ) # Validate provider setup before initializing OpenSearch index - # Use lightweight validation (test_completion=False) to avoid consuming credits during onboarding + # Use full validation with completion tests (test_completion=True) to ensure provider health during onboarding try: from api.provider_validation import validate_provider_setup @@ -906,14 +906,14 @@ async def onboarding(request, flows_service, session_manager=None): llm_provider = current_config.agent.llm_provider.lower() llm_provider_config = current_config.get_llm_provider_config() - logger.info(f"Validating LLM provider setup for {llm_provider} (lightweight)") + logger.info(f"Validating LLM provider setup for {llm_provider} (full validation with completion test)") await validate_provider_setup( provider=llm_provider, api_key=getattr(llm_provider_config, "api_key", None), llm_model=current_config.agent.llm_model, endpoint=getattr(llm_provider_config, "endpoint", None), project_id=getattr(llm_provider_config, "project_id", None), - test_completion=False, # Lightweight validation - no credits consumed + test_completion=True, # Full validation with completion test - ensures provider health ) logger.info(f"LLM provider setup validation completed successfully for {llm_provider}") @@ -922,14 +922,14 @@ async def onboarding(request, flows_service, session_manager=None): embedding_provider = current_config.knowledge.embedding_provider.lower() embedding_provider_config = current_config.get_embedding_provider_config() - logger.info(f"Validating embedding provider setup for {embedding_provider} (lightweight)") + logger.info(f"Validating embedding provider setup for {embedding_provider} (full validation with completion test)") await validate_provider_setup( provider=embedding_provider, api_key=getattr(embedding_provider_config, "api_key", None), embedding_model=current_config.knowledge.embedding_model, endpoint=getattr(embedding_provider_config, "endpoint", None), project_id=getattr(embedding_provider_config, "project_id", None), - test_completion=False, # Lightweight validation - no credits consumed + test_completion=True, # Full validation with completion test - ensures provider health ) logger.info(f"Embedding provider setup validation completed successfully for {embedding_provider}") except Exception as e: @@ -1403,6 +1403,139 @@ async def reapply_all_settings(session_manager = None): raise +async def rollback_onboarding(request, session_manager, task_service): + """Rollback onboarding configuration when sample data files fail. + + This will: + 1. Cancel all active tasks + 2. Delete successfully ingested knowledge documents + 3. Reset configuration to allow re-onboarding + """ + try: + # Get current configuration + current_config = get_openrag_config() + + # Only allow rollback if config was marked as edited (onboarding completed) + if not current_config.edited: + return JSONResponse( + {"error": "No onboarding configuration to rollback"}, status_code=400 + ) + + user = request.state.user + jwt_token = session_manager.get_effective_jwt_token(user.user_id, request.state.jwt_token) + + logger.info("Rolling back onboarding configuration due to file failures") + + # Get all tasks for the user + all_tasks = task_service.get_all_tasks(user.user_id) + + cancelled_tasks = [] + deleted_files = [] + + # Cancel all active tasks and collect successfully ingested files + for task_data in all_tasks: + task_id = task_data.get("task_id") + task_status = task_data.get("status") + + # Cancel active tasks (pending, running, processing) + if task_status in ["pending", "running", "processing"]: + try: + success = await task_service.cancel_task(user.user_id, task_id) + if success: + cancelled_tasks.append(task_id) + logger.info(f"Cancelled task {task_id}") + except Exception as e: + logger.error(f"Failed to cancel task {task_id}: {str(e)}") + + # For completed tasks, find successfully ingested files and delete them + elif task_status == "completed": + files = task_data.get("files", {}) + if isinstance(files, dict): + for file_path, file_info in files.items(): + # Check if file was successfully ingested + if isinstance(file_info, dict): + file_status = file_info.get("status") + filename = file_info.get("filename") or file_path.split("/")[-1] + + if file_status == "completed" and filename: + try: + # Get user's OpenSearch client + opensearch_client = session_manager.get_user_opensearch_client( + user.user_id, jwt_token + ) + + # Delete documents by filename + from utils.opensearch_queries import build_filename_delete_body + from config.settings import INDEX_NAME + + delete_query = build_filename_delete_body(filename) + + result = await opensearch_client.delete_by_query( + index=INDEX_NAME, + body=delete_query, + conflicts="proceed" + ) + + deleted_count = result.get("deleted", 0) + if deleted_count > 0: + deleted_files.append(filename) + logger.info(f"Deleted {deleted_count} chunks for filename {filename}") + except Exception as e: + logger.error(f"Failed to delete documents for {filename}: {str(e)}") + + # Clear embedding provider and model settings + current_config.knowledge.embedding_provider = "openai" # Reset to default + current_config.knowledge.embedding_model = "" + + # Mark config as not edited so user can go through onboarding again + current_config.edited = False + + # Save the rolled back configuration manually to avoid save_config_file setting edited=True + try: + import yaml + config_file = config_manager.config_file + + # Ensure directory exists + config_file.parent.mkdir(parents=True, exist_ok=True) + + # Save config with edited=False + with open(config_file, "w") as f: + yaml.dump(current_config.to_dict(), f, default_flow_style=False, indent=2) + + # Update cached config + config_manager._config = current_config + + logger.info("Successfully saved rolled back configuration with edited=False") + except Exception as e: + logger.error(f"Failed to save rolled back configuration: {e}") + return JSONResponse( + {"error": "Failed to save rolled back configuration"}, status_code=500 + ) + + logger.info( + f"Successfully rolled back onboarding configuration. " + f"Cancelled {len(cancelled_tasks)} tasks, deleted {len(deleted_files)} files" + ) + await TelemetryClient.send_event( + Category.ONBOARDING, + MessageId.ORB_ONBOARD_ROLLBACK + ) + + return JSONResponse( + { + "message": "Onboarding configuration rolled back successfully", + "cancelled_tasks": len(cancelled_tasks), + "deleted_files": len(deleted_files), + } + ) + + except Exception as e: + logger.error("Failed to rollback onboarding configuration", error=str(e)) + return JSONResponse( + {"error": f"Failed to rollback onboarding: {str(e)}"}, status_code=500 + ) + + async def update_docling_preset(request, session_manager): """Update docling settings in the ingest flow - deprecated endpoint, use /settings instead""" try: diff --git a/src/config/settings.py b/src/config/settings.py index 75b09f09..b590ab8b 100644 --- a/src/config/settings.py +++ b/src/config/settings.py @@ -1,3 +1,4 @@ +import asyncio import os import time @@ -140,61 +141,29 @@ INDEX_BODY = { LANGFLOW_BASE_URL = f"{LANGFLOW_URL}/api/v1" -async def generate_langflow_api_key(modify: bool = False): - """Generate Langflow API key using superuser credentials at startup""" +async def get_langflow_api_key(force_regenerate: bool = False): + """Get the Langflow API key, generating one if needed. + + Args: + force_regenerate: If True, generates a new key even if one is cached. + Used when a request fails with 401/403 to get a fresh key. + """ global LANGFLOW_KEY logger.debug( - "generate_langflow_api_key called", current_key_present=bool(LANGFLOW_KEY) + "get_langflow_api_key called", + current_key_present=bool(LANGFLOW_KEY), + force_regenerate=force_regenerate, ) - # If key already provided via env, do not attempt generation - if LANGFLOW_KEY: - if os.getenv("LANGFLOW_KEY"): - logger.info("Using LANGFLOW_KEY from environment; skipping generation") - return LANGFLOW_KEY - else: - # We have a cached key, but let's validate it first - logger.debug("Validating cached LANGFLOW_KEY", key_prefix=LANGFLOW_KEY[:8]) - try: - validation_response = requests.get( - f"{LANGFLOW_URL}/api/v1/users/whoami", - headers={"x-api-key": LANGFLOW_KEY}, - timeout=5, - ) - if validation_response.status_code == 200: - logger.debug("Cached API key is valid", key_prefix=LANGFLOW_KEY[:8]) - return LANGFLOW_KEY - elif validation_response.status_code in (401, 403): - logger.warning( - "Cached API key is unauthorized, generating fresh key", - status_code=validation_response.status_code, - ) - LANGFLOW_KEY = None # Clear invalid key - else: - logger.warning( - "Cached API key validation returned non-access error; keeping existing key", - status_code=validation_response.status_code, - ) - return LANGFLOW_KEY - except requests.exceptions.Timeout as e: - logger.warning( - "Cached API key validation timed out; keeping existing key", - error=str(e), - ) - return LANGFLOW_KEY - except requests.exceptions.RequestException as e: - logger.warning( - "Cached API key validation failed due to request error; keeping existing key", - error=str(e), - ) - return LANGFLOW_KEY - except Exception as e: - logger.warning( - "Unexpected error during cached API key validation; keeping existing key", - error=str(e), - ) - return LANGFLOW_KEY + # If we have a cached key and not forcing regeneration, return it + if LANGFLOW_KEY and not force_regenerate: + return LANGFLOW_KEY + + # If forcing regeneration, clear the cached key + if force_regenerate and LANGFLOW_KEY: + logger.info("Forcing Langflow API key regeneration due to auth failure") + LANGFLOW_KEY = None # Use default langflow/langflow credentials if auto-login is enabled and credentials not set username = LANGFLOW_SUPERUSER @@ -216,72 +185,70 @@ async def generate_langflow_api_key(modify: bool = False): max_attempts = int(os.getenv("LANGFLOW_KEY_RETRIES", "15")) delay_seconds = float(os.getenv("LANGFLOW_KEY_RETRY_DELAY", "2.0")) - for attempt in range(1, max_attempts + 1): - try: - # Login to get access token - login_response = requests.post( - f"{LANGFLOW_URL}/api/v1/login", - headers={"Content-Type": "application/x-www-form-urlencoded"}, - data={ - "username": username, - "password": password, - }, - timeout=10, - ) - login_response.raise_for_status() - access_token = login_response.json().get("access_token") - if not access_token: - raise KeyError("access_token") - - # Create API key - api_key_response = requests.post( - f"{LANGFLOW_URL}/api/v1/api_key/", - headers={ - "Content-Type": "application/json", - "Authorization": f"Bearer {access_token}", - }, - json={"name": "openrag-auto-generated"}, - timeout=10, - ) - api_key_response.raise_for_status() - api_key = api_key_response.json().get("api_key") - if not api_key: - raise KeyError("api_key") - - # Validate the API key works - validation_response = requests.get( - f"{LANGFLOW_URL}/api/v1/users/whoami", - headers={"x-api-key": api_key}, - timeout=10, - ) - if validation_response.status_code == 200: - LANGFLOW_KEY = api_key - logger.info( - "Successfully generated and validated Langflow API key", - key_prefix=api_key[:8], + async with httpx.AsyncClient(timeout=10.0) as client: + for attempt in range(1, max_attempts + 1): + try: + # Login to get access token + login_response = await client.post( + f"{LANGFLOW_URL}/api/v1/login", + headers={"Content-Type": "application/x-www-form-urlencoded"}, + data={ + "username": username, + "password": password, + }, ) - return api_key - else: - logger.error( - "Generated API key validation failed", - status_code=validation_response.status_code, - ) - raise ValueError( - f"API key validation failed: {validation_response.status_code}" - ) - except (requests.exceptions.RequestException, KeyError) as e: - logger.warning( - "Attempt to generate Langflow API key failed", - attempt=attempt, - max_attempts=max_attempts, - error=str(e), - ) - if attempt < max_attempts: - time.sleep(delay_seconds) - else: - raise + login_response.raise_for_status() + access_token = login_response.json().get("access_token") + if not access_token: + raise KeyError("access_token") - except requests.exceptions.RequestException as e: + # Create API key + api_key_response = await client.post( + f"{LANGFLOW_URL}/api/v1/api_key/", + headers={ + "Content-Type": "application/json", + "Authorization": f"Bearer {access_token}", + }, + json={"name": "openrag-auto-generated"}, + ) + api_key_response.raise_for_status() + api_key = api_key_response.json().get("api_key") + if not api_key: + raise KeyError("api_key") + + # Validate the API key works + validation_response = await client.get( + f"{LANGFLOW_URL}/api/v1/users/whoami", + headers={"x-api-key": api_key}, + ) + if validation_response.status_code == 200: + LANGFLOW_KEY = api_key + logger.info( + "Successfully generated and validated Langflow API key", + key_prefix=api_key[:8], + ) + return api_key + else: + logger.error( + "Generated API key validation failed", + status_code=validation_response.status_code, + ) + raise ValueError( + f"API key validation failed: {validation_response.status_code}" + ) + except (httpx.HTTPStatusError, httpx.RequestError, KeyError) as e: + logger.warning( + "Attempt to generate Langflow API key failed", + attempt=attempt, + max_attempts=max_attempts, + error=str(e), + ) + if attempt < max_attempts: + await asyncio.sleep(delay_seconds) + else: + raise + + except (httpx.HTTPStatusError, httpx.RequestError) as e: logger.error("Failed to generate Langflow API key", error=str(e)) return None except KeyError as e: @@ -303,7 +270,7 @@ class AppClients: async def initialize(self): # Generate Langflow API key first - await generate_langflow_api_key() + await get_langflow_api_key() # Initialize OpenSearch client self.opensearch = AsyncOpenSearch( @@ -362,7 +329,7 @@ class AppClients: if self.langflow_client is not None: return self.langflow_client # Try generating key again (with retries) - await generate_langflow_api_key() + await get_langflow_api_key() if LANGFLOW_KEY and self.langflow_client is None: try: self.langflow_client = AsyncOpenAI( @@ -559,8 +526,11 @@ class AppClients: self.langflow_client = None async def langflow_request(self, method: str, endpoint: str, **kwargs): - """Central method for all Langflow API requests""" - api_key = await generate_langflow_api_key() + """Central method for all Langflow API requests. + + Retries once with a fresh API key on auth failures (401/403). + """ + api_key = await get_langflow_api_key() if not api_key: raise ValueError("No Langflow API key available") @@ -575,57 +545,65 @@ class AppClients: url = f"{LANGFLOW_URL}{endpoint}" - return await self.langflow_http_client.request( + response = await self.langflow_http_client.request( method=method, url=url, headers=headers, **kwargs ) + # Retry once with a fresh API key on auth failure + if response.status_code in (401, 403): + logger.warning( + "Langflow request auth failed, regenerating API key and retrying", + status_code=response.status_code, + endpoint=endpoint, + ) + api_key = await get_langflow_api_key(force_regenerate=True) + if api_key: + headers["x-api-key"] = api_key + response = await self.langflow_http_client.request( + method=method, url=url, headers=headers, **kwargs + ) + + return response + async def _create_langflow_global_variable( self, name: str, value: str, modify: bool = False ): """Create a global variable in Langflow via API""" - api_key = await generate_langflow_api_key() - if not api_key: - logger.warning( - "Cannot create Langflow global variable: No API key", variable_name=name - ) - return - - url = f"{LANGFLOW_URL}/api/v1/variables/" payload = { "name": name, "value": value, "default_fields": [], "type": "Credential", } - headers = {"x-api-key": api_key, "Content-Type": "application/json"} try: - async with httpx.AsyncClient() as client: - response = await client.post(url, headers=headers, json=payload) + response = await self.langflow_request( + "POST", "/api/v1/variables/", json=payload + ) - if response.status_code in [200, 201]: + if response.status_code in [200, 201]: + logger.info( + "Successfully created Langflow global variable", + variable_name=name, + ) + elif response.status_code == 400 and "already exists" in response.text: + if modify: logger.info( - "Successfully created Langflow global variable", + "Langflow global variable already exists, attempting to update", variable_name=name, ) - elif response.status_code == 400 and "already exists" in response.text: - if modify: - logger.info( - "Langflow global variable already exists, attempting to update", - variable_name=name, - ) - await self._update_langflow_global_variable(name, value) - else: - logger.info( - "Langflow global variable already exists", - variable_name=name, - ) + await self._update_langflow_global_variable(name, value) else: - logger.warning( - "Failed to create Langflow global variable", + logger.info( + "Langflow global variable already exists", variable_name=name, - status_code=response.status_code, ) + else: + logger.warning( + "Failed to create Langflow global variable", + variable_name=name, + status_code=response.status_code, + ) except Exception as e: logger.error( "Exception creating Langflow global variable", @@ -635,76 +613,62 @@ class AppClients: async def _update_langflow_global_variable(self, name: str, value: str): """Update an existing global variable in Langflow via API""" - api_key = await generate_langflow_api_key() - if not api_key: - logger.warning( - "Cannot update Langflow global variable: No API key", variable_name=name - ) - return - - headers = {"x-api-key": api_key, "Content-Type": "application/json"} - try: - async with httpx.AsyncClient() as client: - # First, get all variables to find the one with the matching name - get_response = await client.get( - f"{LANGFLOW_URL}/api/v1/variables/", headers=headers + # First, get all variables to find the one with the matching name + get_response = await self.langflow_request("GET", "/api/v1/variables/") + + if get_response.status_code != 200: + logger.error( + "Failed to retrieve variables for update", + variable_name=name, + status_code=get_response.status_code, ) + return - if get_response.status_code != 200: - logger.error( - "Failed to retrieve variables for update", - variable_name=name, - status_code=get_response.status_code, - ) - return + variables = get_response.json() + target_variable = None - variables = get_response.json() - target_variable = None + # Find the variable with matching name + for variable in variables: + if variable.get("name") == name: + target_variable = variable + break - # Find the variable with matching name - for variable in variables: - if variable.get("name") == name: - target_variable = variable - break + if not target_variable: + logger.error("Variable not found for update", variable_name=name) + return - if not target_variable: - logger.error("Variable not found for update", variable_name=name) - return + variable_id = target_variable.get("id") + if not variable_id: + logger.error("Variable ID not found for update", variable_name=name) + return - variable_id = target_variable.get("id") - if not variable_id: - logger.error("Variable ID not found for update", variable_name=name) - return + # Update the variable using PATCH + update_payload = { + "id": variable_id, + "name": name, + "value": value, + "default_fields": target_variable.get("default_fields", []), + } - # Update the variable using PATCH - update_payload = { - "id": variable_id, - "name": name, - "value": value, - "default_fields": target_variable.get("default_fields", []), - } + patch_response = await self.langflow_request( + "PATCH", f"/api/v1/variables/{variable_id}", json=update_payload + ) - patch_response = await client.patch( - f"{LANGFLOW_URL}/api/v1/variables/{variable_id}", - headers=headers, - json=update_payload, + if patch_response.status_code == 200: + logger.info( + "Successfully updated Langflow global variable", + variable_name=name, + variable_id=variable_id, + ) + else: + logger.warning( + "Failed to update Langflow global variable", + variable_name=name, + variable_id=variable_id, + status_code=patch_response.status_code, + response_text=patch_response.text, ) - - if patch_response.status_code == 200: - logger.info( - "Successfully updated Langflow global variable", - variable_name=name, - variable_id=variable_id, - ) - else: - logger.warning( - "Failed to update Langflow global variable", - variable_name=name, - variable_id=variable_id, - status_code=patch_response.status_code, - response_text=patch_response.text, - ) except Exception as e: logger.error( diff --git a/src/main.py b/src/main.py index e32bc081..1c3d065e 100644 --- a/src/main.py +++ b/src/main.py @@ -1179,6 +1179,18 @@ async def create_app(): ), methods=["POST"], ), + # Onboarding rollback endpoint + Route( + "/onboarding/rollback", + require_auth(services["session_manager"])( + partial( + settings.rollback_onboarding, + session_manager=services["session_manager"], + task_service=services["task_service"], + ) + ), + methods=["POST"], + ), # Docling preset update endpoint Route( "/settings/docling-preset", diff --git a/src/services/chat_service.py b/src/services/chat_service.py index e965623c..92c834a8 100644 --- a/src/services/chat_service.py +++ b/src/services/chat_service.py @@ -595,7 +595,7 @@ class ChatService: try: # Delete from local conversation storage from agent import delete_user_conversation - local_deleted = delete_user_conversation(user_id, session_id) + local_deleted = await delete_user_conversation(user_id, session_id) # Delete from Langflow using the monitor API langflow_deleted = await self._delete_langflow_session(session_id) diff --git a/src/services/conversation_persistence_service.py b/src/services/conversation_persistence_service.py index c6b62c24..0c7edc84 100644 --- a/src/services/conversation_persistence_service.py +++ b/src/services/conversation_persistence_service.py @@ -5,6 +5,7 @@ Simple service to persist chat conversations to disk so they survive server rest import json import os +import asyncio from typing import Dict, Any from datetime import datetime import threading @@ -33,8 +34,8 @@ class ConversationPersistenceService: return {} return {} - def _save_conversations(self): - """Save conversations to disk""" + def _save_conversations_sync(self): + """Synchronous save conversations to disk (runs in executor)""" try: with self.lock: with open(self.storage_file, 'w', encoding='utf-8') as f: @@ -43,6 +44,12 @@ class ConversationPersistenceService: except Exception as e: logger.error(f"Error saving conversations to {self.storage_file}: {e}") + async def _save_conversations(self): + """Async save conversations to disk (non-blocking)""" + # Run the synchronous file I/O in a thread pool to avoid blocking the event loop + loop = asyncio.get_event_loop() + await loop.run_in_executor(None, self._save_conversations_sync) + def _count_total_conversations(self, data: Dict[str, Any]) -> int: """Count total conversations across all users""" total = 0 @@ -68,8 +75,8 @@ class ConversationPersistenceService: else: return obj - def store_conversation_thread(self, user_id: str, response_id: str, conversation_state: Dict[str, Any]): - """Store a conversation thread and persist to disk""" + async def store_conversation_thread(self, user_id: str, response_id: str, conversation_state: Dict[str, Any]): + """Store a conversation thread and persist to disk (async, non-blocking)""" if user_id not in self._conversations: self._conversations[user_id] = {} @@ -78,28 +85,28 @@ class ConversationPersistenceService: self._conversations[user_id][response_id] = serialized_conversation - # Save to disk (we could optimize this with batching if needed) - self._save_conversations() + # Save to disk asynchronously (non-blocking) + await self._save_conversations() def get_conversation_thread(self, user_id: str, response_id: str) -> Dict[str, Any]: """Get a specific conversation thread""" user_conversations = self.get_user_conversations(user_id) return user_conversations.get(response_id, {}) - def delete_conversation_thread(self, user_id: str, response_id: str) -> bool: - """Delete a specific conversation thread""" + async def delete_conversation_thread(self, user_id: str, response_id: str) -> bool: + """Delete a specific conversation thread (async, non-blocking)""" if user_id in self._conversations and response_id in self._conversations[user_id]: del self._conversations[user_id][response_id] - self._save_conversations() + await self._save_conversations() logger.debug(f"Deleted conversation {response_id} for user {user_id}") return True return False - def clear_user_conversations(self, user_id: str): - """Clear all conversations for a user""" + async def clear_user_conversations(self, user_id: str): + """Clear all conversations for a user (async, non-blocking)""" if user_id in self._conversations: del self._conversations[user_id] - self._save_conversations() + await self._save_conversations() logger.debug(f"Cleared all conversations for user {user_id}") def get_storage_stats(self) -> Dict[str, Any]: diff --git a/src/utils/telemetry/message_id.py b/src/utils/telemetry/message_id.py index af242257..c00e5eb3 100644 --- a/src/utils/telemetry/message_id.py +++ b/src/utils/telemetry/message_id.py @@ -199,3 +199,5 @@ class MessageId: ORB_ONBOARD_SAMPLE_DATA = "ORB_ONBOARD_SAMPLE_DATA" # Message: Configuration marked as edited ORB_ONBOARD_CONFIG_EDITED = "ORB_ONBOARD_CONFIG_EDITED" + # Message: Onboarding rolled back due to all files failing + ORB_ONBOARD_ROLLBACK = "ORB_ONBOARD_ROLLBACK"