Update Langflow tweaks and add provider credentials to headers

Replaces all references to 'OpenSearchHybrid-Ve6bS' with 'OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4' in main.py, processors, and file service. Adds a utility for injecting provider credentials into Langflow request headers and integrates it into chat and file services for improved credential handling.
This commit is contained in:
Edwin Jose 2025-11-25 22:50:38 -05:00
parent 07b84e373a
commit 0f2012bbb9
5 changed files with 64 additions and 10 deletions

View file

@ -370,7 +370,7 @@ async def _ingest_default_documents_langflow(services, file_paths):
# Prepare tweaks for default documents with anonymous user metadata
default_tweaks = {
"OpenSearchHybrid-Ve6bS": {
"OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4": {
"docs_metadata": [
{"key": "owner", "value": None},
{"key": "owner_name", "value": anonymous_user.name},

View file

@ -743,9 +743,9 @@ class LangflowFileProcessor(TaskProcessor):
if metadata_tweaks:
# Initialize the OpenSearch component tweaks if not already present
if "OpenSearchHybrid-Ve6bS" not in final_tweaks:
final_tweaks["OpenSearchHybrid-Ve6bS"] = {}
final_tweaks["OpenSearchHybrid-Ve6bS"]["docs_metadata"] = metadata_tweaks
if "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4" not in final_tweaks:
final_tweaks["OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4"] = {}
final_tweaks["OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4"]["docs_metadata"] = metadata_tweaks
# Process file using langflow service
result = await self.langflow_file_service.upload_and_ingest_file(

View file

@ -67,10 +67,15 @@ class ChatService:
# Pass the selected embedding model as a global variable
from config.settings import get_openrag_config
from utils.langflow_headers import add_provider_credentials_to_headers
config = get_openrag_config()
embedding_model = config.knowledge.embedding_model
extra_headers["X-LANGFLOW-GLOBAL-VAR-SELECTED_EMBEDDING_MODEL"] = embedding_model
# Add provider credentials to headers
add_provider_credentials_to_headers(extra_headers, config)
logger.debug(f"[LF] Extra headers {extra_headers}")
# Get context variables for filters, limit, and threshold
from auth_context import (
get_score_threshold,
@ -182,10 +187,15 @@ class ChatService:
# Pass the selected embedding model as a global variable
from config.settings import get_openrag_config
from utils.langflow_headers import add_provider_credentials_to_headers
config = get_openrag_config()
embedding_model = config.knowledge.embedding_model
extra_headers["X-LANGFLOW-GLOBAL-VAR-SELECTED_EMBEDDING_MODEL"] = embedding_model
# Add provider credentials to headers
add_provider_credentials_to_headers(extra_headers, config)
# Build the complete filter expression like the chat service does
filter_expression = {}
has_user_filters = False
@ -306,9 +316,15 @@ class ChatService:
# Pass the selected embedding model as a global variable
from config.settings import get_openrag_config
from utils.langflow_headers import add_provider_credentials_to_headers
config = get_openrag_config()
embedding_model = config.knowledge.embedding_model
extra_headers["X-LANGFLOW-GLOBAL-VAR-SELECTED_EMBEDDING_MODEL"] = embedding_model
# Add provider credentials to headers
add_provider_credentials_to_headers(extra_headers, config)
# Ensure the Langflow client exists; try lazy init if needed
langflow_client = await clients.ensure_langflow_client()
if not langflow_client:

View file

@ -94,7 +94,7 @@ class LangflowFileService:
# Pass JWT token via tweaks using the x-langflow-global-var- pattern
if jwt_token:
# Using the global variable pattern that Langflow expects for OpenSearch components
tweaks["OpenSearchHybrid-Ve6bS"] = {"jwt_token": jwt_token}
tweaks["OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4"] = {"jwt_token": jwt_token}
logger.debug("[LF] Added JWT token to tweaks for OpenSearch components")
else:
logger.warning("[LF] No JWT token provided")
@ -112,9 +112,9 @@ class LangflowFileService:
logger.info(f"[LF] Metadata tweaks {metadata_tweaks}")
# if metadata_tweaks:
# # Initialize the OpenSearch component tweaks if not already present
# if "OpenSearchHybrid-Ve6bS" not in tweaks:
# tweaks["OpenSearchHybrid-Ve6bS"] = {}
# tweaks["OpenSearchHybrid-Ve6bS"]["docs_metadata"] = metadata_tweaks
# if "OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4" not in tweaks:
# tweaks["OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4"] = {}
# tweaks["OpenSearchVectorStoreComponentMultimodalMultiEmbedding-By9U4"]["docs_metadata"] = metadata_tweaks
# logger.debug(
# "[LF] Added metadata to tweaks", metadata_count=len(metadata_tweaks)
# )
@ -140,8 +140,10 @@ class LangflowFileService:
filename = str(file_tuples[0][0]) if file_tuples and len(file_tuples) > 0 else ""
mimetype = str(file_tuples[0][2]) if file_tuples and len(file_tuples) > 0 and len(file_tuples[0]) > 2 else ""
# Get the current embedding model from config
# Get the current embedding model and provider credentials from config
from config.settings import get_openrag_config
from utils.langflow_headers import add_provider_credentials_to_headers
config = get_openrag_config()
embedding_model = config.knowledge.embedding_model
@ -156,6 +158,9 @@ class LangflowFileService:
"X-Langflow-Global-Var-FILESIZE": str(file_size_bytes),
"X-Langflow-Global-Var-SELECTED_EMBEDDING_MODEL": str(embedding_model),
}
# Add provider credentials as global variables for ingestion
add_provider_credentials_to_headers(headers, config)
logger.info(f"[LF] Headers {headers}")
logger.info(f"[LF] Payload {payload}")
resp = await clients.langflow_request(

View file

@ -0,0 +1,33 @@
"""Utility functions for building Langflow request headers."""
from typing import Dict
from utils.container_utils import transform_localhost_url
def add_provider_credentials_to_headers(headers: Dict[str, str], config) -> None:
"""Add provider credentials to headers as Langflow global variables.
Args:
headers: Dictionary of headers to add credentials to
config: OpenRAGConfig object containing provider configurations
"""
# Add OpenAI credentials
if config.providers.openai.api_key:
headers["X-LANGFLOW-GLOBAL-VAR-OPENAI_API_KEY"] = str(config.providers.openai.api_key)
# Add Anthropic credentials
if config.providers.anthropic.api_key:
headers["X-LANGFLOW-GLOBAL-VAR-ANTHROPIC_API_KEY"] = str(config.providers.anthropic.api_key)
# Add WatsonX credentials
if config.providers.watsonx.api_key:
headers["X-LANGFLOW-GLOBAL-VAR-WATSONX_API_KEY"] = str(config.providers.watsonx.api_key)
if config.providers.watsonx.project_id:
headers["X-LANGFLOW-GLOBAL-VAR-WATSONX_PROJECT_ID"] = str(config.providers.watsonx.project_id)
# Add Ollama endpoint (with localhost transformation)
if config.providers.ollama.endpoint:
ollama_endpoint = transform_localhost_url(config.providers.ollama.endpoint)
headers["X-LANGFLOW-GLOBAL-VAR-OLLAMA_BASE_URL"] = str(ollama_endpoint)