Merge branch 'main' into default-data

This commit is contained in:
Sebastián Estévez 2025-09-04 13:55:51 -04:00 committed by GitHub
commit e4e3535fe2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
38 changed files with 4015 additions and 366 deletions

View file

@ -1,5 +1,3 @@
# make one like so https://docs.langflow.org/api-keys-and-authentication#langflow-secret-key
LANGFLOW_SECRET_KEY=
# flow id from the the openrag flow json
FLOW_ID=1098eea1-6649-4e1d-aed1-b77249fb8dd0
# Set a strong admin password for OpenSearch; a bcrypt hash is generated at
@ -21,3 +19,10 @@ AWS_SECRET_ACCESS_KEY=
# OPTIONAL url for openrag link to langflow in the UI
LANGFLOW_PUBLIC_URL=
# Langflow auth
LANGFLOW_AUTO_LOGIN=False
LANGFLOW_SUPERUSER=
LANGFLOW_SUPERUSER_PASSWORD=
LANGFLOW_NEW_USER_IS_ACTIVE=False
LANGFLOW_ENABLE_SUPERUSER_CLI=False

View file

@ -6,115 +6,126 @@ on:
jobs:
build:
strategy:
fail-fast: false
matrix:
include:
- platform: linux/amd64
# backend
- image: backend
file: ./Dockerfile.backend
tag: phact/openrag-backend
platform: linux/amd64
arch: amd64
runs-on: ubuntu-latest
arch-suffix: amd64
- platform: linux/arm64
runs-on: self-hosted
arch-suffix: arm64
- image: backend
file: ./Dockerfile.backend
tag: phact/openrag-backend
platform: linux/arm64
arch: arm64
runs-on: self-hosted
# frontend
- image: frontend
file: ./Dockerfile.frontend
tag: phact/openrag-frontend
platform: linux/amd64
arch: amd64
runs-on: ubuntu-latest
- image: frontend
file: ./Dockerfile.frontend
tag: phact/openrag-frontend
platform: linux/arm64
arch: arm64
runs-on: self-hosted
# opensearch
- image: opensearch
file: ./Dockerfile
tag: phact/openrag-opensearch
platform: linux/amd64
arch: amd64
runs-on: ubuntu-latest
- image: opensearch
file: ./Dockerfile
tag: phact/openrag-opensearch
platform: linux/arm64
arch: arm64
runs-on: self-hosted
runs-on: ${{ matrix.runs-on }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Checkout
uses: actions/checkout@v4
- name: Extract version from pyproject.toml
id: version
run: |
VERSION=$(grep '^version = ' pyproject.toml | cut -d '"' -f 2)
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "Version: $VERSION"
- name: Extract version from pyproject.toml
id: version
run: |
VERSION=$(grep '^version = ' pyproject.toml | cut -d '"' -f 2)
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "Version: $VERSION"
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Login to Docker Hub
if: github.event_name != 'pull_request'
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Build and push backend
uses: docker/build-push-action@v5
with:
context: .
file: ./Dockerfile.backend
platforms: ${{ matrix.platform }}
push: ${{ github.event_name != 'pull_request' }}
tags: phact/openrag-backend:${{ steps.version.outputs.version }}-${{ matrix.arch-suffix }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Build and push frontend
uses: docker/build-push-action@v5
with:
context: .
file: ./Dockerfile.frontend
platforms: ${{ matrix.platform }}
push: ${{ github.event_name != 'pull_request' }}
tags: phact/openrag-frontend:${{ steps.version.outputs.version }}-${{ matrix.arch-suffix }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Build and push OpenSearch
uses: docker/build-push-action@v5
with:
context: .
file: ./Dockerfile
platforms: ${{ matrix.platform }}
push: ${{ github.event_name != 'pull_request' }}
tags: phact/openrag-opensearch:${{ steps.version.outputs.version }}-${{ matrix.arch-suffix }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Build and push ${{ matrix.image }} (${{ matrix.arch }})
uses: docker/build-push-action@v5
with:
context: .
file: ${{ matrix.file }}
platforms: ${{ matrix.platform }}
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ matrix.tag }}:${{ steps.version.outputs.version }}-${{ matrix.arch }}
cache-from: type=gha,scope=${{ matrix.image }}-${{ matrix.arch }}
cache-to: type=gha,mode=max,scope=${{ matrix.image }}-${{ matrix.arch }}
manifest:
needs: build
runs-on: ubuntu-latest
if: github.event_name != 'pull_request'
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Checkout
uses: actions/checkout@v4
- name: Extract version from pyproject.toml
id: version
run: |
VERSION=$(grep '^version = ' pyproject.toml | cut -d '"' -f 2)
echo "version=$VERSION" >> $GITHUB_OUTPUT
- name: Extract version from pyproject.toml
id: version
run: |
VERSION=$(grep '^version = ' pyproject.toml | cut -d '"' -f 2)
echo "version=$VERSION" >> $GITHUB_OUTPUT
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Create and push multi-arch manifests
run: |
VERSION=${{ steps.version.outputs.version }}
# Backend manifest
docker buildx imagetools create -t phact/openrag-backend:$VERSION \
phact/openrag-backend:$VERSION-amd64 \
phact/openrag-backend:$VERSION-arm64
docker buildx imagetools create -t phact/openrag-backend:latest \
phact/openrag-backend:$VERSION-amd64 \
phact/openrag-backend:$VERSION-arm64
# Frontend manifest
docker buildx imagetools create -t phact/openrag-frontend:$VERSION \
phact/openrag-frontend:$VERSION-amd64 \
phact/openrag-frontend:$VERSION-arm64
docker buildx imagetools create -t phact/openrag-frontend:latest \
phact/openrag-frontend:$VERSION-amd64 \
phact/openrag-frontend:$VERSION-arm64
# OpenSearch manifest
docker buildx imagetools create -t phact/openrag-opensearch:$VERSION \
phact/openrag-opensearch:$VERSION-amd64 \
phact/openrag-opensearch:$VERSION-arm64
docker buildx imagetools create -t phact/openrag-opensearch:latest \
phact/openrag-opensearch:$VERSION-amd64 \
phact/openrag-opensearch:$VERSION-arm64
- name: Create and push multi-arch manifests
run: |
VERSION=${{ steps.version.outputs.version }}
docker buildx imagetools create -t phact/openrag-backend:$VERSION \
phact/openrag-backend:$VERSION-amd64 \
phact/openrag-backend:$VERSION-arm64
docker buildx imagetools create -t phact/openrag-backend:latest \
phact/openrag-backend:$VERSION-amd64 \
phact/openrag-backend:$VERSION-arm64
docker buildx imagetools create -t phact/openrag-frontend:$VERSION \
phact/openrag-frontend:$VERSION-amd64 \
phact/openrag-frontend:$VERSION-arm64
docker buildx imagetools create -t phact/openrag-frontend:latest \
phact/openrag-frontend:$VERSION-amd64 \
phact/openrag-frontend:$VERSION-arm64
docker buildx imagetools create -t phact/openrag-opensearch:$VERSION \
phact/openrag-opensearch:$VERSION-amd64 \
phact/openrag-opensearch:$VERSION-arm64
docker buildx imagetools create -t phact/openrag-opensearch:latest \
phact/openrag-opensearch:$VERSION-amd64 \
phact/openrag-opensearch:$VERSION-arm64

View file

@ -33,6 +33,10 @@ RUN echo "asprof \$@" >> /usr/share/opensearch/profile.sh
RUN chmod 777 /usr/share/opensearch/profile.sh
# Copy OIDC and DLS security configuration (as root)
COPY securityconfig/ /usr/share/opensearch/securityconfig/
RUN chown -R opensearch:opensearch /usr/share/opensearch/securityconfig/
USER opensearch
RUN opensearch-plugin remove opensearch-neural-search
@ -46,16 +50,13 @@ RUN echo y | opensearch-plugin install repository-gcs
RUN echo y | opensearch-plugin install repository-azure
RUN echo y | opensearch-plugin install repository-s3
# Copy OIDC and DLS security configuration
COPY securityconfig/ /usr/share/opensearch/securityconfig/
# Create a script to apply security configuration after OpenSearch starts
RUN echo '#!/bin/bash' > /usr/share/opensearch/setup-security.sh && \
echo 'echo "Waiting for OpenSearch to start..."' >> /usr/share/opensearch/setup-security.sh && \
echo 'until curl -s -k -u admin:${OPENSEARCH_INITIAL_ADMIN_PASSWORD} https://localhost:9200; do sleep 1; done' >> /usr/share/opensearch/setup-security.sh && \
echo 'echo "Generating admin hash from OPENSEARCH_INITIAL_ADMIN_PASSWORD..."' >> /usr/share/opensearch/setup-security.sh && \
echo 'if [ -z "${OPENSEARCH_INITIAL_ADMIN_PASSWORD}" ]; then echo "[ERROR] OPENSEARCH_INITIAL_ADMIN_PASSWORD not set"; exit 1; fi' >> /usr/share/opensearch/setup-security.sh && \
echo 'HASH=$(/usr/share/opensearch/plugins/opensearch-security/tools/hash.sh -p "${OPENSEARCH_INITIAL_ADMIN_PASSWORD}" | sed -n '\''s/^hash: //p'\'')' >> /usr/share/opensearch/setup-security.sh && \
echo 'HASH=$(/usr/share/opensearch/plugins/opensearch-security/tools/hash.sh -p "${OPENSEARCH_INITIAL_ADMIN_PASSWORD}")' >> /usr/share/opensearch/setup-security.sh && \
echo 'if [ -z "$HASH" ]; then echo "[ERROR] Failed to generate admin hash"; exit 1; fi' >> /usr/share/opensearch/setup-security.sh && \
echo 'sed -i "s|^ hash: \".*\"| hash: \"$HASH\"|" /usr/share/opensearch/securityconfig/internal_users.yml' >> /usr/share/opensearch/setup-security.sh && \
echo 'echo "Updated internal_users.yml with runtime-generated admin hash"' >> /usr/share/opensearch/setup-security.sh && \

BIN
frontend/src/app/icon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

View file

@ -22,6 +22,11 @@ dependencies = [
"uvicorn>=0.35.0",
"boto3>=1.35.0",
"psutil>=7.0.0",
"rich>=13.0.0",
"textual>=0.45.0",
"python-dotenv>=1.0.0",
"textual-fspicker>=0.6.0",
"structlog>=25.4.0",
]
[tool.uv.sources]

View file

@ -1,3 +1,7 @@
from utils.logging_config import get_logger
logger = get_logger(__name__)
# User-scoped conversation state - keyed by user_id -> response_id -> conversation
user_conversations = {} # user_id -> {response_id: {"messages": [...], "previous_response_id": parent_id, "created_at": timestamp, "last_activity": timestamp}}
@ -65,7 +69,7 @@ async def async_response_stream(
previous_response_id: str = None,
log_prefix: str = "response",
):
print(f"user ==> {prompt}")
logger.info("User prompt received", prompt=prompt)
try:
# Build request parameters
@ -91,7 +95,7 @@ async def async_response_stream(
chunk_count = 0
async for chunk in response:
chunk_count += 1
print(f"[DEBUG] Chunk {chunk_count}: {chunk}")
logger.debug("Stream chunk received", chunk_count=chunk_count, chunk=str(chunk))
# Yield the raw event as JSON for the UI to process
import json
@ -125,7 +129,7 @@ async def async_response_stream(
yield (json.dumps(chunk_data, default=str) + "\n").encode("utf-8")
except Exception as e:
# Fallback to string representation
print(f"[DEBUG] JSON serialization failed: {e}")
logger.warning("JSON serialization failed", error=str(e))
yield (
json.dumps(
{"error": f"Serialization failed: {e}", "raw": str(chunk)}
@ -133,11 +137,11 @@ async def async_response_stream(
+ "\n"
).encode("utf-8")
print(f"[DEBUG] Stream complete. Total chunks: {chunk_count}")
print(f"{log_prefix} ==> {full_response}")
logger.debug("Stream complete", total_chunks=chunk_count)
logger.info("Response generated", log_prefix=log_prefix, response=full_response)
except Exception as e:
print(f"[ERROR] Exception in streaming: {e}")
logger.error("Exception in streaming", error=str(e))
import traceback
traceback.print_exc()
@ -153,7 +157,7 @@ async def async_response(
previous_response_id: str = None,
log_prefix: str = "response",
):
print(f"user ==> {prompt}")
logger.info("User prompt received", prompt=prompt)
# Build request parameters
request_params = {
@ -170,7 +174,7 @@ async def async_response(
response = await client.responses.create(**request_params)
response_text = response.output_text
print(f"{log_prefix} ==> {response_text}")
logger.info("Response generated", log_prefix=log_prefix, response=response_text)
# Extract and store response_id if available
response_id = getattr(response, "id", None) or getattr(
@ -227,7 +231,7 @@ async def async_langflow_stream(
extra_headers: dict = None,
previous_response_id: str = None,
):
print(f"[DEBUG] Starting langflow stream for prompt: {prompt}")
logger.debug("Starting langflow stream", prompt=prompt)
try:
async for chunk in async_stream(
langflow_client,
@ -237,11 +241,11 @@ async def async_langflow_stream(
previous_response_id=previous_response_id,
log_prefix="langflow",
):
print(f"[DEBUG] Yielding chunk from langflow_stream: {chunk[:100]}...")
logger.debug("Yielding chunk from langflow stream", chunk_preview=chunk[:100].decode('utf-8', errors='replace'))
yield chunk
print(f"[DEBUG] Langflow stream completed")
logger.debug("Langflow stream completed")
except Exception as e:
print(f"[ERROR] Exception in langflow_stream: {e}")
logger.error("Exception in langflow stream", error=str(e))
import traceback
traceback.print_exc()
@ -256,24 +260,18 @@ async def async_chat(
model: str = "gpt-4.1-mini",
previous_response_id: str = None,
):
print(
f"[DEBUG] async_chat called with user_id: {user_id}, previous_response_id: {previous_response_id}"
)
logger.debug("async_chat called", user_id=user_id, previous_response_id=previous_response_id)
# Get the specific conversation thread (or create new one)
conversation_state = get_conversation_thread(user_id, previous_response_id)
print(
f"[DEBUG] Got conversation_state with {len(conversation_state['messages'])} messages"
)
logger.debug("Got conversation state", message_count=len(conversation_state['messages']))
# Add user message to conversation with timestamp
from datetime import datetime
user_message = {"role": "user", "content": prompt, "timestamp": datetime.now()}
conversation_state["messages"].append(user_message)
print(
f"[DEBUG] Added user message, now {len(conversation_state['messages'])} messages"
)
logger.debug("Added user message", message_count=len(conversation_state['messages']))
response_text, response_id = await async_response(
async_client,
@ -282,9 +280,7 @@ async def async_chat(
previous_response_id=previous_response_id,
log_prefix="agent",
)
print(
f"[DEBUG] Got response_text: {response_text[:50]}..., response_id: {response_id}"
)
logger.debug("Got response", response_preview=response_text[:50], response_id=response_id)
# Add assistant response to conversation with response_id and timestamp
assistant_message = {
@ -294,25 +290,19 @@ async def async_chat(
"timestamp": datetime.now(),
}
conversation_state["messages"].append(assistant_message)
print(
f"[DEBUG] Added assistant message, now {len(conversation_state['messages'])} messages"
)
logger.debug("Added assistant message", message_count=len(conversation_state['messages']))
# Store the conversation thread with its response_id
if response_id:
conversation_state["last_activity"] = datetime.now()
store_conversation_thread(user_id, response_id, conversation_state)
print(
f"[DEBUG] Stored conversation thread for user {user_id} with response_id: {response_id}"
)
logger.debug("Stored conversation thread", user_id=user_id, response_id=response_id)
# Debug: Check what's in user_conversations now
conversations = get_user_conversations(user_id)
print(
f"[DEBUG] user_conversations now has {len(conversations)} conversations: {list(conversations.keys())}"
)
logger.debug("User conversations updated", user_id=user_id, conversation_count=len(conversations), conversation_ids=list(conversations.keys()))
else:
print(f"[DEBUG] WARNING: No response_id received, conversation not stored!")
logger.warning("No response_id received, conversation not stored")
return response_text, response_id
@ -373,9 +363,7 @@ async def async_chat_stream(
if response_id:
conversation_state["last_activity"] = datetime.now()
store_conversation_thread(user_id, response_id, conversation_state)
print(
f"Stored conversation thread for user {user_id} with response_id: {response_id}"
)
logger.debug("Stored conversation thread", user_id=user_id, response_id=response_id)
# Async langflow function with conversation storage (non-streaming)
@ -387,24 +375,18 @@ async def async_langflow_chat(
extra_headers: dict = None,
previous_response_id: str = None,
):
print(
f"[DEBUG] async_langflow_chat called with user_id: {user_id}, previous_response_id: {previous_response_id}"
)
logger.debug("async_langflow_chat called", user_id=user_id, previous_response_id=previous_response_id)
# Get the specific conversation thread (or create new one)
conversation_state = get_conversation_thread(user_id, previous_response_id)
print(
f"[DEBUG] Got langflow conversation_state with {len(conversation_state['messages'])} messages"
)
logger.debug("Got langflow conversation state", message_count=len(conversation_state['messages']))
# Add user message to conversation with timestamp
from datetime import datetime
user_message = {"role": "user", "content": prompt, "timestamp": datetime.now()}
conversation_state["messages"].append(user_message)
print(
f"[DEBUG] Added user message to langflow, now {len(conversation_state['messages'])} messages"
)
logger.debug("Added user message to langflow", message_count=len(conversation_state['messages']))
response_text, response_id = await async_response(
langflow_client,
@ -414,9 +396,7 @@ async def async_langflow_chat(
previous_response_id=previous_response_id,
log_prefix="langflow",
)
print(
f"[DEBUG] Got langflow response_text: {response_text[:50]}..., response_id: {response_id}"
)
logger.debug("Got langflow response", response_preview=response_text[:50], response_id=response_id)
# Add assistant response to conversation with response_id and timestamp
assistant_message = {
@ -426,27 +406,19 @@ async def async_langflow_chat(
"timestamp": datetime.now(),
}
conversation_state["messages"].append(assistant_message)
print(
f"[DEBUG] Added assistant message to langflow, now {len(conversation_state['messages'])} messages"
)
logger.debug("Added assistant message to langflow", message_count=len(conversation_state['messages']))
# Store the conversation thread with its response_id
if response_id:
conversation_state["last_activity"] = datetime.now()
store_conversation_thread(user_id, response_id, conversation_state)
print(
f"[DEBUG] Stored langflow conversation thread for user {user_id} with response_id: {response_id}"
)
logger.debug("Stored langflow conversation thread", user_id=user_id, response_id=response_id)
# Debug: Check what's in user_conversations now
conversations = get_user_conversations(user_id)
print(
f"[DEBUG] user_conversations now has {len(conversations)} conversations: {list(conversations.keys())}"
)
logger.debug("User conversations updated", user_id=user_id, conversation_count=len(conversations), conversation_ids=list(conversations.keys()))
else:
print(
f"[DEBUG] WARNING: No response_id received from langflow, conversation not stored!"
)
logger.warning("No response_id received from langflow, conversation not stored")
return response_text, response_id
@ -460,9 +432,7 @@ async def async_langflow_chat_stream(
extra_headers: dict = None,
previous_response_id: str = None,
):
print(
f"[DEBUG] async_langflow_chat_stream called with user_id: {user_id}, previous_response_id: {previous_response_id}"
)
logger.debug("async_langflow_chat_stream called", user_id=user_id, previous_response_id=previous_response_id)
# Get the specific conversation thread (or create new one)
conversation_state = get_conversation_thread(user_id, previous_response_id)
@ -513,6 +483,4 @@ async def async_langflow_chat_stream(
if response_id:
conversation_state["last_activity"] = datetime.now()
store_conversation_thread(user_id, response_id, conversation_state)
print(
f"[DEBUG] Stored langflow conversation thread for user {user_id} with response_id: {response_id}"
)
logger.debug("Stored langflow conversation thread", user_id=user_id, response_id=response_id)

View file

@ -1,5 +1,8 @@
from starlette.requests import Request
from starlette.responses import JSONResponse, StreamingResponse
from utils.logging_config import get_logger
logger = get_logger(__name__)
async def chat_endpoint(request: Request, chat_service, session_manager):
@ -122,7 +125,7 @@ async def langflow_endpoint(request: Request, chat_service, session_manager):
import traceback
traceback.print_exc()
print(f"[ERROR] Langflow request failed: {str(e)}")
logger.error("Langflow request failed", error=str(e))
return JSONResponse(
{"error": f"Langflow request failed: {str(e)}"}, status_code=500
)

View file

@ -1,5 +1,8 @@
from starlette.requests import Request
from starlette.responses import JSONResponse, PlainTextResponse
from utils.logging_config import get_logger
logger = get_logger(__name__)
async def list_connectors(request: Request, connector_service, session_manager):
@ -10,7 +13,7 @@ async def list_connectors(request: Request, connector_service, session_manager):
)
return JSONResponse({"connectors": connector_types})
except Exception as e:
print(f"Error listing connectors: {e}")
logger.error("Error listing connectors", error=str(e))
return JSONResponse({"error": str(e)}, status_code=500)
@ -21,13 +24,11 @@ async def connector_sync(request: Request, connector_service, session_manager):
max_files = data.get("max_files")
try:
print(
f"[DEBUG] Starting connector sync for connector_type={connector_type}, max_files={max_files}"
)
logger.debug("Starting connector sync", connector_type=connector_type, max_files=max_files)
user = request.state.user
jwt_token = request.state.jwt_token
print(f"[DEBUG] User: {user.user_id}")
logger.debug("User authenticated", user_id=user.user_id)
# Get all active connections for this connector type and user
connections = await connector_service.connection_manager.list_connections(
@ -44,14 +45,12 @@ async def connector_sync(request: Request, connector_service, session_manager):
# Start sync tasks for all active connections
task_ids = []
for connection in active_connections:
print(
f"[DEBUG] About to call sync_connector_files for connection {connection.connection_id}"
)
logger.debug("About to call sync_connector_files for connection", connection_id=connection.connection_id)
task_id = await connector_service.sync_connector_files(
connection.connection_id, user.user_id, max_files, jwt_token=jwt_token
)
task_ids.append(task_id)
print(f"[DEBUG] Got task_id: {task_id}")
logger.debug("Got task ID", task_id=task_id)
return JSONResponse(
{
@ -68,7 +67,7 @@ async def connector_sync(request: Request, connector_service, session_manager):
import traceback
error_msg = f"[ERROR] Connector sync failed: {str(e)}"
print(error_msg, file=sys.stderr, flush=True)
logger.error(error_msg)
traceback.print_exc(file=sys.stderr)
sys.stderr.flush()
@ -156,7 +155,7 @@ async def connector_webhook(request: Request, connector_service, session_manager
payload["_headers"] = headers
payload["_method"] = request.method
print(f"[WEBHOOK] {connector_type} notification received")
logger.info("Webhook notification received", connector_type=connector_type)
# Extract channel/subscription ID using connector-specific method
try:
@ -168,7 +167,7 @@ async def connector_webhook(request: Request, connector_service, session_manager
channel_id = None
if not channel_id:
print(f"[WEBHOOK] No channel ID found in {connector_type} webhook")
logger.warning("No channel ID found in webhook", connector_type=connector_type)
return JSONResponse({"status": "ignored", "reason": "no_channel_id"})
# Find the specific connection for this webhook
@ -178,9 +177,7 @@ async def connector_webhook(request: Request, connector_service, session_manager
)
)
if not connection or not connection.is_active:
print(
f"[WEBHOOK] Unknown channel {channel_id} - no cleanup attempted (will auto-expire)"
)
logger.info("Unknown webhook channel, will auto-expire", channel_id=channel_id)
return JSONResponse(
{"status": "ignored_unknown_channel", "channel_id": channel_id}
)
@ -191,9 +188,7 @@ async def connector_webhook(request: Request, connector_service, session_manager
# Get the connector instance
connector = await connector_service._get_connector(connection.connection_id)
if not connector:
print(
f"[WEBHOOK] Could not get connector for connection {connection.connection_id}"
)
logger.error("Could not get connector for connection", connection_id=connection.connection_id)
return JSONResponse(
{"status": "error", "reason": "connector_not_found"}
)
@ -202,9 +197,7 @@ async def connector_webhook(request: Request, connector_service, session_manager
affected_files = await connector.handle_webhook(payload)
if affected_files:
print(
f"[WEBHOOK] Connection {connection.connection_id}: {len(affected_files)} files affected"
)
logger.info("Webhook connection files affected", connection_id=connection.connection_id, affected_count=len(affected_files))
# Generate JWT token for the user (needed for OpenSearch authentication)
user = session_manager.get_user(connection.user_id)
@ -228,9 +221,7 @@ async def connector_webhook(request: Request, connector_service, session_manager
}
else:
# No specific files identified - just log the webhook
print(
f"[WEBHOOK] Connection {connection.connection_id}: general change detected, no specific files to sync"
)
logger.info("Webhook general change detected, no specific files", connection_id=connection.connection_id)
result = {
"connection_id": connection.connection_id,
@ -248,9 +239,7 @@ async def connector_webhook(request: Request, connector_service, session_manager
)
except Exception as e:
print(
f"[ERROR] Failed to process webhook for connection {connection.connection_id}: {e}"
)
logger.error("Failed to process webhook for connection", connection_id=connection.connection_id, error=str(e))
import traceback
traceback.print_exc()
@ -267,7 +256,7 @@ async def connector_webhook(request: Request, connector_service, session_manager
except Exception as e:
import traceback
print(f"[ERROR] Webhook processing failed: {str(e)}")
logger.error("Webhook processing failed", error=str(e))
traceback.print_exc()
return JSONResponse(
{"error": f"Webhook processing failed: {str(e)}"}, status_code=500

View file

@ -3,6 +3,9 @@ from starlette.responses import JSONResponse
import uuid
import json
from datetime import datetime
from utils.logging_config import get_logger
logger = get_logger(__name__)
async def create_knowledge_filter(
@ -392,17 +395,15 @@ async def knowledge_filter_webhook(
# Get the webhook payload
payload = await request.json()
print(
f"[WEBHOOK] Knowledge filter webhook received for filter {filter_id}, subscription {subscription_id}"
)
print(f"[WEBHOOK] Payload: {json.dumps(payload, indent=2)}")
logger.info("Knowledge filter webhook received",
filter_id=filter_id,
subscription_id=subscription_id,
payload_size=len(str(payload)))
# Extract findings from the payload
findings = payload.get("findings", [])
if not findings:
print(
f"[WEBHOOK] No findings in webhook payload for subscription {subscription_id}"
)
logger.info("No findings in webhook payload", subscription_id=subscription_id)
return JSONResponse({"status": "no_findings"})
# Process the findings - these are the documents that matched the knowledge filter
@ -419,13 +420,14 @@ async def knowledge_filter_webhook(
)
# Log the matched documents
print(
f"[WEBHOOK] Knowledge filter {filter_id} matched {len(matched_documents)} documents"
)
logger.info("Knowledge filter matched documents",
filter_id=filter_id,
matched_count=len(matched_documents))
for doc in matched_documents:
print(
f"[WEBHOOK] Matched document: {doc['document_id']} from index {doc['index']}"
)
logger.debug("Matched document",
document_id=doc['document_id'],
index=doc['index'],
score=doc.get('score'))
# Here you could add additional processing:
# - Send notifications to external webhooks
@ -444,7 +446,10 @@ async def knowledge_filter_webhook(
)
except Exception as e:
print(f"[ERROR] Failed to process knowledge filter webhook: {str(e)}")
logger.error("Failed to process knowledge filter webhook",
filter_id=filter_id,
subscription_id=subscription_id,
error=str(e))
import traceback
traceback.print_exc()

View file

@ -1,5 +1,8 @@
from starlette.requests import Request
from starlette.responses import JSONResponse
from utils.logging_config import get_logger
logger = get_logger(__name__)
async def search(request: Request, search_service, session_manager):
@ -20,9 +23,14 @@ async def search(request: Request, search_service, session_manager):
# Extract JWT token from auth middleware
jwt_token = request.state.jwt_token
print(
f"[DEBUG] search API: user={user}, user_id={user.user_id if user else None}, jwt_token={'None' if jwt_token is None else 'present'}"
)
logger.debug("Search API request",
user=str(user),
user_id=user.user_id if user else None,
has_jwt_token=jwt_token is not None,
query=query,
filters=filters,
limit=limit,
score_threshold=score_threshold)
result = await search_service.search(
query,

View file

@ -3,6 +3,9 @@ import requests
import asyncio
import time
from dotenv import load_dotenv
from utils.logging_config import get_logger
logger = get_logger(__name__)
from opensearchpy import AsyncOpenSearch
from opensearchpy._async.http_aiohttp import AIOHttpConnection
from docling.document_converter import DocumentConverter
@ -34,9 +37,7 @@ GOOGLE_OAUTH_CLIENT_SECRET = os.getenv("GOOGLE_OAUTH_CLIENT_SECRET")
def is_no_auth_mode():
"""Check if we're running in no-auth mode (OAuth credentials missing)"""
result = not (GOOGLE_OAUTH_CLIENT_ID and GOOGLE_OAUTH_CLIENT_SECRET)
print(
f"[DEBUG] is_no_auth_mode() = {result}, CLIENT_ID={GOOGLE_OAUTH_CLIENT_ID is not None}, CLIENT_SECRET={GOOGLE_OAUTH_CLIENT_SECRET is not None}"
)
logger.debug("Checking auth mode", no_auth_mode=result, has_client_id=GOOGLE_OAUTH_CLIENT_ID is not None, has_client_secret=GOOGLE_OAUTH_CLIENT_SECRET is not None)
return result
@ -95,17 +96,15 @@ async def generate_langflow_api_key():
# If key already provided via env, do not attempt generation
if LANGFLOW_KEY:
print("[INFO] Using LANGFLOW_KEY from environment; skipping generation")
logger.info("Using LANGFLOW_KEY from environment, skipping generation")
return LANGFLOW_KEY
if not LANGFLOW_SUPERUSER or not LANGFLOW_SUPERUSER_PASSWORD:
print(
"[WARNING] LANGFLOW_SUPERUSER and LANGFLOW_SUPERUSER_PASSWORD not set, skipping API key generation"
)
logger.warning("LANGFLOW_SUPERUSER and LANGFLOW_SUPERUSER_PASSWORD not set, skipping API key generation")
return None
try:
print("[INFO] Generating Langflow API key using superuser credentials...")
logger.info("Generating Langflow API key using superuser credentials")
max_attempts = int(os.getenv("LANGFLOW_KEY_RETRIES", "15"))
delay_seconds = float(os.getenv("LANGFLOW_KEY_RETRY_DELAY", "2.0"))
@ -143,28 +142,24 @@ async def generate_langflow_api_key():
raise KeyError("api_key")
LANGFLOW_KEY = api_key
print(
f"[INFO] Successfully generated Langflow API key: {api_key[:8]}..."
)
logger.info("Successfully generated Langflow API key", api_key_preview=api_key[:8])
return api_key
except (requests.exceptions.RequestException, KeyError) as e:
last_error = e
print(
f"[WARN] Attempt {attempt}/{max_attempts} to generate Langflow API key failed: {e}"
)
logger.warning("Attempt to generate Langflow API key failed", attempt=attempt, max_attempts=max_attempts, error=str(e))
if attempt < max_attempts:
time.sleep(delay_seconds)
else:
raise
except requests.exceptions.RequestException as e:
print(f"[ERROR] Failed to generate Langflow API key: {e}")
logger.error("Failed to generate Langflow API key", error=str(e))
return None
except KeyError as e:
print(f"[ERROR] Unexpected response format from Langflow: missing {e}")
logger.error("Unexpected response format from Langflow", missing_field=str(e))
return None
except Exception as e:
print(f"[ERROR] Unexpected error generating Langflow API key: {e}")
logger.error("Unexpected error generating Langflow API key", error=str(e))
return None
@ -198,12 +193,10 @@ class AppClients:
base_url=f"{LANGFLOW_URL}/api/v1", api_key=LANGFLOW_KEY
)
except Exception as e:
print(f"[WARNING] Failed to initialize Langflow client: {e}")
logger.warning("Failed to initialize Langflow client", error=str(e))
self.langflow_client = None
if self.langflow_client is None:
print(
"[WARNING] No Langflow client initialized yet; will attempt later on first use"
)
logger.warning("No Langflow client initialized yet, will attempt later on first use")
# Initialize patched OpenAI client
self.patched_async_client = patch_openai_with_mcp(AsyncOpenAI())
@ -224,9 +217,9 @@ class AppClients:
self.langflow_client = AsyncOpenAI(
base_url=f"{LANGFLOW_URL}/api/v1", api_key=LANGFLOW_KEY
)
print("[INFO] Langflow client initialized on-demand")
logger.info("Langflow client initialized on-demand")
except Exception as e:
print(f"[ERROR] Failed to initialize Langflow client on-demand: {e}")
logger.error("Failed to initialize Langflow client on-demand", error=str(e))
self.langflow_client = None
return self.langflow_client

View file

@ -6,6 +6,9 @@ from typing import Dict, List, Any, Optional
from datetime import datetime
from dataclasses import dataclass, asdict
from pathlib import Path
from utils.logging_config import get_logger
logger = get_logger(__name__)
from .base import BaseConnector
from .google_drive import GoogleDriveConnector
@ -318,21 +321,17 @@ class ConnectionManager:
if connection_config.config.get(
"webhook_channel_id"
) or connection_config.config.get("subscription_id"):
print(
f"[WEBHOOK] Subscription already exists for connection {connection_id}"
)
logger.info("Webhook subscription already exists", connection_id=connection_id)
return
# Check if webhook URL is configured
webhook_url = connection_config.config.get("webhook_url")
if not webhook_url:
print(
f"[WEBHOOK] No webhook URL configured for connection {connection_id}, skipping subscription setup"
)
logger.info("No webhook URL configured, skipping subscription setup", connection_id=connection_id)
return
try:
print(f"[WEBHOOK] Setting up subscription for connection {connection_id}")
logger.info("Setting up webhook subscription", connection_id=connection_id)
subscription_id = await connector.setup_subscription()
# Store the subscription and resource IDs in connection config
@ -346,14 +345,10 @@ class ConnectionManager:
# Save updated connection config
await self.save_connections()
print(
f"[WEBHOOK] Successfully set up subscription {subscription_id} for connection {connection_id}"
)
logger.info("Successfully set up webhook subscription", connection_id=connection_id, subscription_id=subscription_id)
except Exception as e:
print(
f"[ERROR] Failed to setup webhook subscription for connection {connection_id}: {e}"
)
logger.error("Failed to setup webhook subscription", connection_id=connection_id, error=str(e))
# Don't fail the entire connection setup if webhook fails
async def _setup_webhook_for_new_connection(
@ -361,16 +356,12 @@ class ConnectionManager:
):
"""Setup webhook subscription for a newly authenticated connection"""
try:
print(
f"[WEBHOOK] Setting up subscription for newly authenticated connection {connection_id}"
)
logger.info("Setting up subscription for newly authenticated connection", connection_id=connection_id)
# Create and authenticate connector
connector = self._create_connector(connection_config)
if not await connector.authenticate():
print(
f"[ERROR] Failed to authenticate connector for webhook setup: {connection_id}"
)
logger.error("Failed to authenticate connector for webhook setup", connection_id=connection_id)
return
# Setup subscription
@ -385,12 +376,8 @@ class ConnectionManager:
# Save updated connection config
await self.save_connections()
print(
f"[WEBHOOK] Successfully set up subscription {subscription_id} for connection {connection_id}"
)
logger.info("Successfully set up webhook subscription", connection_id=connection_id, subscription_id=subscription_id)
except Exception as e:
print(
f"[ERROR] Failed to setup webhook subscription for new connection {connection_id}: {e}"
)
logger.error("Failed to setup webhook subscription for new connection", connection_id=connection_id, error=str(e))
# Don't fail the connection setup if webhook fails

View file

@ -7,6 +7,9 @@ from typing import Dict, List, Any, Optional
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from googleapiclient.http import MediaIoBaseDownload
from utils.logging_config import get_logger
logger = get_logger(__name__)
from ..base import BaseConnector, ConnectorDocument, DocumentACL
from .oauth import GoogleDriveOAuth
@ -20,9 +23,7 @@ def get_worker_drive_service(client_id: str, client_secret: str, token_file: str
"""Get or create a Google Drive service instance for this worker process"""
global _worker_drive_service
if _worker_drive_service is None:
print(
f"🔧 Initializing Google Drive service in worker process (PID: {os.getpid()})"
)
logger.info("Initializing Google Drive service in worker process", pid=os.getpid())
# Create OAuth instance and load credentials in worker
from .oauth import GoogleDriveOAuth
@ -39,9 +40,7 @@ def get_worker_drive_service(client_id: str, client_secret: str, token_file: str
try:
loop.run_until_complete(oauth.load_credentials())
_worker_drive_service = oauth.get_service()
print(
f"✅ Google Drive service ready in worker process (PID: {os.getpid()})"
)
logger.info("Google Drive service ready in worker process", pid=os.getpid())
finally:
loop.close()
@ -215,7 +214,7 @@ class GoogleDriveConnector(BaseConnector):
return True
return False
except Exception as e:
print(f"Authentication failed: {e}")
logger.error("Authentication failed", error=str(e))
return False
async def setup_subscription(self) -> str:
@ -258,7 +257,7 @@ class GoogleDriveConnector(BaseConnector):
return channel_id
except HttpError as e:
print(f"Failed to set up subscription: {e}")
logger.error("Failed to set up subscription", error=str(e))
raise
def _get_start_page_token(self) -> str:
@ -340,7 +339,7 @@ class GoogleDriveConnector(BaseConnector):
return {"files": files, "nextPageToken": results.get("nextPageToken")}
except HttpError as e:
print(f"Failed to list files: {e}")
logger.error("Failed to list files", error=str(e))
raise
async def get_file_content(self, file_id: str) -> ConnectorDocument:
@ -397,7 +396,7 @@ class GoogleDriveConnector(BaseConnector):
)
except HttpError as e:
print(f"Failed to get file content: {e}")
logger.error("Failed to get file content", error=str(e))
raise
async def _download_file_content(
@ -477,19 +476,17 @@ class GoogleDriveConnector(BaseConnector):
resource_state = headers.get("x-goog-resource-state")
if not channel_id:
print("[WEBHOOK] No channel ID found in Google Drive webhook")
logger.warning("No channel ID found in Google Drive webhook")
return []
# Check if this webhook belongs to this connection
if self.webhook_channel_id != channel_id:
print(
f"[WEBHOOK] Channel ID mismatch: expected {self.webhook_channel_id}, got {channel_id}"
)
logger.warning("Channel ID mismatch", expected=self.webhook_channel_id, received=channel_id)
return []
# Only process certain states (ignore 'sync' which is just a ping)
if resource_state not in ["exists", "not_exists", "change"]:
print(f"[WEBHOOK] Ignoring resource state: {resource_state}")
logger.debug("Ignoring resource state", state=resource_state)
return []
try:
@ -508,10 +505,10 @@ class GoogleDriveConnector(BaseConnector):
page_token = query_params.get("pageToken", [None])[0]
if not page_token:
print("[WEBHOOK] No page token found, cannot identify specific changes")
logger.warning("No page token found, cannot identify specific changes")
return []
print(f"[WEBHOOK] Getting changes since page token: {page_token}")
logger.info("Getting changes since page token", page_token=page_token)
# Get list of changes since the page token
changes = (
@ -536,23 +533,19 @@ class GoogleDriveConnector(BaseConnector):
is_trashed = file_info.get("trashed", False)
if not is_trashed and mime_type in self.SUPPORTED_MIMETYPES:
print(
f"[WEBHOOK] File changed: {file_info.get('name', 'Unknown')} ({file_id})"
)
logger.info("File changed", filename=file_info.get('name', 'Unknown'), file_id=file_id)
affected_files.append(file_id)
elif is_trashed:
print(
f"[WEBHOOK] File deleted/trashed: {file_info.get('name', 'Unknown')} ({file_id})"
)
logger.info("File deleted/trashed", filename=file_info.get('name', 'Unknown'), file_id=file_id)
# TODO: Handle file deletion (remove from index)
else:
print(f"[WEBHOOK] Ignoring unsupported file type: {mime_type}")
logger.debug("Ignoring unsupported file type", mime_type=mime_type)
print(f"[WEBHOOK] Found {len(affected_files)} affected supported files")
logger.info("Found affected supported files", count=len(affected_files))
return affected_files
except HttpError as e:
print(f"Failed to handle webhook: {e}")
logger.error("Failed to handle webhook", error=str(e))
return []
async def cleanup_subscription(self, subscription_id: str) -> bool:
@ -574,5 +567,5 @@ class GoogleDriveConnector(BaseConnector):
self.service.channels().stop(body=body).execute()
return True
except HttpError as e:
print(f"Failed to cleanup subscription: {e}")
logger.error("Failed to cleanup subscription", error=str(e))
return False

View file

@ -1,3 +1,16 @@
import sys
# Check for TUI flag FIRST, before any heavy imports
if __name__ == "__main__" and len(sys.argv) > 1 and sys.argv[1] == "--tui":
from tui.main import run_tui
run_tui()
sys.exit(0)
# Configure structured logging early
from utils.logging_config import configure_from_env, get_logger
configure_from_env()
logger = get_logger(__name__)
import asyncio
import atexit
import multiprocessing
@ -47,8 +60,7 @@ from api import (
settings,
)
print("CUDA available:", torch.cuda.is_available())
print("CUDA version PyTorch was built with:", torch.version.cuda)
logger.info("CUDA device information", cuda_available=torch.cuda.is_available(), cuda_version=torch.version.cuda)
async def wait_for_opensearch():
@ -59,12 +71,10 @@ async def wait_for_opensearch():
for attempt in range(max_retries):
try:
await clients.opensearch.info()
print("OpenSearch is ready!")
logger.info("OpenSearch is ready")
return
except Exception as e:
print(
f"Attempt {attempt + 1}/{max_retries}: OpenSearch not ready yet ({e})"
)
logger.warning("OpenSearch not ready yet", attempt=attempt + 1, max_retries=max_retries, error=str(e))
if attempt < max_retries - 1:
await asyncio.sleep(retry_delay)
else:
@ -86,10 +96,9 @@ async def configure_alerting_security():
# Use admin client (clients.opensearch uses admin credentials)
response = await clients.opensearch.cluster.put_settings(body=alerting_settings)
print("Alerting security settings configured successfully")
print(f"Response: {response}")
logger.info("Alerting security settings configured successfully", response=response)
except Exception as e:
print(f"Warning: Failed to configure alerting security settings: {e}")
logger.warning("Failed to configure alerting security settings", error=str(e))
# Don't fail startup if alerting config fails
@ -100,9 +109,9 @@ async def init_index():
# Create documents index
if not await clients.opensearch.indices.exists(index=INDEX_NAME):
await clients.opensearch.indices.create(index=INDEX_NAME, body=INDEX_BODY)
print(f"Created index '{INDEX_NAME}'")
logger.info("Created OpenSearch index", index_name=INDEX_NAME)
else:
print(f"Index '{INDEX_NAME}' already exists, skipping creation.")
logger.info("Index already exists, skipping creation", index_name=INDEX_NAME)
# Create knowledge filters index
knowledge_filter_index_name = "knowledge_filters"
@ -127,11 +136,9 @@ async def init_index():
await clients.opensearch.indices.create(
index=knowledge_filter_index_name, body=knowledge_filter_index_body
)
print(f"Created index '{knowledge_filter_index_name}'")
logger.info("Created knowledge filters index", index_name=knowledge_filter_index_name)
else:
print(
f"Index '{knowledge_filter_index_name}' already exists, skipping creation."
)
logger.info("Knowledge filters index already exists, skipping creation", index_name=knowledge_filter_index_name)
# Configure alerting plugin security settings
await configure_alerting_security()
@ -171,24 +178,22 @@ def generate_jwt_keys():
capture_output=True,
)
print("Generated RSA keys for JWT signing")
logger.info("Generated RSA keys for JWT signing")
except subprocess.CalledProcessError as e:
print(f"Failed to generate RSA keys: {e}")
logger.error("Failed to generate RSA keys", error=str(e))
raise
else:
print("RSA keys already exist, skipping generation")
logger.info("RSA keys already exist, skipping generation")
async def init_index_when_ready():
"""Initialize OpenSearch index when it becomes available"""
try:
await init_index()
print("OpenSearch index initialization completed successfully")
logger.info("OpenSearch index initialization completed successfully")
except Exception as e:
print(f"OpenSearch index initialization failed: {e}")
print(
"OIDC endpoints will still work, but document operations may fail until OpenSearch is ready"
)
logger.error("OpenSearch index initialization failed", error=str(e))
logger.warning("OIDC endpoints will still work, but document operations may fail until OpenSearch is ready")
async def ingest_default_documents_when_ready(services):
@ -277,13 +282,11 @@ async def initialize_services():
try:
await connector_service.initialize()
loaded_count = len(connector_service.connection_manager.connections)
print(
f"[CONNECTORS] Loaded {loaded_count} persisted connection(s) on startup"
)
logger.info("Loaded persisted connector connections on startup", loaded_count=loaded_count)
except Exception as e:
print(f"[WARNING] Failed to load persisted connections on startup: {e}")
logger.warning("Failed to load persisted connections on startup", error=str(e))
else:
print(f"[CONNECTORS] Skipping connection loading in no-auth mode")
logger.info("Skipping connector loading in no-auth mode")
return {
"document_service": document_service,
@ -703,13 +706,13 @@ async def startup():
def cleanup():
"""Cleanup on application shutdown"""
# Cleanup process pools only (webhooks handled by Starlette shutdown)
print("[CLEANUP] Shutting down...")
logger.info("Application shutting down")
pass
async def cleanup_subscriptions_proper(services):
"""Cancel all active webhook subscriptions"""
print("[CLEANUP] Cancelling active webhook subscriptions...")
logger.info("Cancelling active webhook subscriptions")
try:
connector_service = services["connector_service"]
@ -725,30 +728,27 @@ async def cleanup_subscriptions_proper(services):
for connection in active_connections:
try:
print(
f"[CLEANUP] Cancelling subscription for connection {connection.connection_id}"
)
logger.info("Cancelling subscription for connection", connection_id=connection.connection_id)
connector = await connector_service.get_connector(
connection.connection_id
)
if connector:
subscription_id = connection.config.get("webhook_channel_id")
await connector.cleanup_subscription(subscription_id)
print(f"[CLEANUP] Cancelled subscription {subscription_id}")
logger.info("Cancelled subscription", subscription_id=subscription_id)
except Exception as e:
print(
f"[ERROR] Failed to cancel subscription for {connection.connection_id}: {e}"
)
logger.error("Failed to cancel subscription", connection_id=connection.connection_id, error=str(e))
print(f"[CLEANUP] Finished cancelling {len(active_connections)} subscriptions")
logger.info("Finished cancelling subscriptions", subscription_count=len(active_connections))
except Exception as e:
print(f"[ERROR] Failed to cleanup subscriptions: {e}")
logger.error("Failed to cleanup subscriptions", error=str(e))
if __name__ == "__main__":
import uvicorn
# TUI check already handled at top of file
# Register cleanup function
atexit.register(cleanup)

View file

@ -2,6 +2,9 @@ from config.settings import clients, LANGFLOW_URL, FLOW_ID
from agent import async_chat, async_langflow, async_chat_stream, async_langflow_stream
from auth_context import set_auth_context
import json
from utils.logging_config import get_logger
logger = get_logger(__name__)
class ChatService:
@ -108,9 +111,7 @@ class ChatService:
# Pass the complete filter expression as a single header to Langflow (only if we have something to send)
if filter_expression:
print(
f"Sending OpenRAG query filter to Langflow: {json.dumps(filter_expression, indent=2)}"
)
logger.info("Sending OpenRAG query filter to Langflow", filter_expression=filter_expression)
extra_headers["X-LANGFLOW-GLOBAL-VAR-OPENRAG-QUERY-FILTER"] = json.dumps(
filter_expression
)
@ -200,9 +201,7 @@ class ChatService:
return {"error": "User ID is required", "conversations": []}
conversations_dict = get_user_conversations(user_id)
print(
f"[DEBUG] get_chat_history for user {user_id}: found {len(conversations_dict)} conversations"
)
logger.debug("Getting chat history for user", user_id=user_id, conversation_count=len(conversations_dict))
# Convert conversations dict to list format with metadata
conversations = []

View file

@ -8,6 +8,9 @@ from docling_core.types.io import DocumentStream
from typing import List
import openai
import tiktoken
from utils.logging_config import get_logger
logger = get_logger(__name__)
from config.settings import clients, INDEX_NAME, EMBED_MODEL
from utils.document_processing import extract_relevant, process_document_sync
@ -91,7 +94,7 @@ class DocumentService:
def _recreate_process_pool(self):
"""Recreate the process pool if it's broken"""
if self._process_pool_broken and self.process_pool:
print("[WARNING] Attempting to recreate broken process pool...")
logger.warning("Attempting to recreate broken process pool")
try:
# Shutdown the old pool
self.process_pool.shutdown(wait=False)
@ -102,10 +105,10 @@ class DocumentService:
self.process_pool = ProcessPoolExecutor(max_workers=MAX_WORKERS)
self._process_pool_broken = False
print(f"[INFO] Process pool recreated with {MAX_WORKERS} workers")
logger.info("Process pool recreated", worker_count=MAX_WORKERS)
return True
except Exception as e:
print(f"[ERROR] Failed to recreate process pool: {e}")
logger.error("Failed to recreate process pool", error=str(e))
return False
return False
@ -193,8 +196,8 @@ class DocumentService:
index=INDEX_NAME, id=chunk_id, body=chunk_doc
)
except Exception as e:
print(f"[ERROR] OpenSearch indexing failed for chunk {chunk_id}: {e}")
print(f"[ERROR] Chunk document: {chunk_doc}")
logger.error("OpenSearch indexing failed for chunk", chunk_id=chunk_id, error=str(e))
logger.error("Chunk document details", chunk_doc=chunk_doc)
raise
return {"status": "indexed", "id": file_hash}
@ -229,9 +232,7 @@ class DocumentService:
try:
exists = await opensearch_client.exists(index=INDEX_NAME, id=file_hash)
except Exception as e:
print(
f"[ERROR] OpenSearch exists check failed for document {file_hash}: {e}"
)
logger.error("OpenSearch exists check failed", file_hash=file_hash, error=str(e))
raise
if exists:
return {"status": "unchanged", "id": file_hash}
@ -371,10 +372,8 @@ class DocumentService:
index=INDEX_NAME, id=chunk_id, body=chunk_doc
)
except Exception as e:
print(
f"[ERROR] OpenSearch indexing failed for batch chunk {chunk_id}: {e}"
)
print(f"[ERROR] Chunk document: {chunk_doc}")
logger.error("OpenSearch indexing failed for batch chunk", chunk_id=chunk_id, error=str(e))
logger.error("Chunk document details", chunk_doc=chunk_doc)
raise
result = {"status": "indexed", "id": slim_doc["id"]}
@ -389,29 +388,25 @@ class DocumentService:
from concurrent.futures import BrokenExecutor
if isinstance(e, BrokenExecutor):
print(f"[CRITICAL] Process pool broken while processing {file_path}")
print(f"[INFO] This usually indicates a worker process crashed")
print(
f"[INFO] You should see detailed crash logs above from the worker process"
)
logger.error("Process pool broken while processing file", file_path=file_path)
logger.info("Worker process likely crashed")
logger.info("You should see detailed crash logs above from the worker process")
# Mark pool as broken for potential recreation
self._process_pool_broken = True
# Attempt to recreate the pool for future operations
if self._recreate_process_pool():
print(f"[INFO] Process pool successfully recreated")
logger.info("Process pool successfully recreated")
else:
print(
f"[WARNING] Failed to recreate process pool - future operations may fail"
)
logger.warning("Failed to recreate process pool - future operations may fail")
file_task.error = f"Worker process crashed: {str(e)}"
else:
print(f"[ERROR] Failed to process file {file_path}: {e}")
logger.error("Failed to process file", file_path=file_path, error=str(e))
file_task.error = str(e)
print(f"[ERROR] Full traceback:")
logger.error("Full traceback available")
traceback.print_exc()
file_task.status = TaskStatus.FAILED
upload_task.failed_files += 1

View file

@ -3,6 +3,9 @@ import json
from typing import Any, Dict, Optional, List
from datetime import datetime
from config.settings import clients
from utils.logging_config import get_logger
logger = get_logger(__name__)
class MonitorService:
@ -192,7 +195,7 @@ class MonitorService:
return monitors
except Exception as e:
print(f"Error listing monitors for user {user_id}: {e}")
logger.error("Error listing monitors for user", user_id=user_id, error=str(e))
return []
async def list_monitors_for_filter(
@ -233,7 +236,7 @@ class MonitorService:
return monitors
except Exception as e:
print(f"Error listing monitors for filter {filter_id}: {e}")
logger.error("Error listing monitors for filter", filter_id=filter_id, error=str(e))
return []
async def _get_or_create_webhook_destination(

View file

@ -2,6 +2,9 @@ from typing import Any, Dict, Optional
from agentd.tool_decorator import tool
from config.settings import clients, INDEX_NAME, EMBED_MODEL
from auth_context import get_auth_context
from utils.logging_config import get_logger
logger = get_logger(__name__)
class SearchService:
@ -135,13 +138,9 @@ class SearchService:
search_body["min_score"] = score_threshold
# Authentication required - DLS will handle document filtering automatically
print(
f"[DEBUG] search_service: user_id={user_id}, jwt_token={'None' if jwt_token is None else 'present'}"
)
logger.debug("search_service authentication info", user_id=user_id, has_jwt_token=jwt_token is not None)
if not user_id:
print(
f"[DEBUG] search_service: user_id is None/empty, returning auth error"
)
logger.debug("search_service: user_id is None/empty, returning auth error")
return {"results": [], "error": "Authentication required"}
# Get user's OpenSearch client with JWT for OIDC auth through session manager
@ -152,8 +151,7 @@ class SearchService:
try:
results = await opensearch_client.search(index=INDEX_NAME, body=search_body)
except Exception as e:
print(f"[ERROR] OpenSearch query failed: {e}")
print(f"[ERROR] Search body: {search_body}")
logger.error("OpenSearch query failed", error=str(e), search_body=search_body)
# Re-raise the exception so the API returns the error to frontend
raise

1
src/tui/__init__.py Normal file
View file

@ -0,0 +1 @@
"""OpenRAG Terminal User Interface package."""

232
src/tui/main.py Normal file
View file

@ -0,0 +1,232 @@
"""Main TUI application for OpenRAG."""
import sys
from pathlib import Path
from textual.app import App, ComposeResult
from .screens.welcome import WelcomeScreen
from .screens.config import ConfigScreen
from .screens.monitor import MonitorScreen
from .screens.logs import LogsScreen
from .screens.diagnostics import DiagnosticsScreen
from .managers.env_manager import EnvManager
from .managers.container_manager import ContainerManager
from .utils.platform import PlatformDetector
from .widgets.diagnostics_notification import notify_with_diagnostics
class OpenRAGTUI(App):
"""OpenRAG Terminal User Interface application."""
TITLE = "OpenRAG TUI"
SUB_TITLE = "Container Management & Configuration"
CSS = """
Screen {
background: $background;
}
#main-container {
height: 100%;
padding: 1;
}
#welcome-container {
align: center middle;
width: 100%;
height: 100%;
}
#welcome-text {
text-align: center;
margin-bottom: 2;
}
.button-row {
align: center middle;
height: auto;
margin: 1 0;
}
.button-row Button {
margin: 0 1;
min-width: 20;
}
#config-header {
text-align: center;
margin-bottom: 2;
}
#config-scroll {
height: 1fr;
overflow-y: auto;
}
#config-form {
width: 80%;
max-width: 100;
margin: 0;
padding: 1;
height: auto;
}
#config-form Input {
margin-bottom: 1;
width: 100%;
}
/* Actions under Documents Paths input */
#docs-path-actions {
width: 100%;
padding-left: 0;
margin-top: -1;
height: auto;
}
#docs-path-actions Button {
width: auto;
min-width: 12;
}
#config-form Label {
margin-bottom: 0;
padding-left: 1;
}
.helper-text {
margin: 0 0 1 1;
}
/* Docs path actions row */
#services-content {
height: 100%;
}
#runtime-status {
background: $panel;
border: solid $primary;
padding: 1;
margin-bottom: 1;
}
#services-table {
height: 1fr;
margin-bottom: 1;
}
#images-table {
height: auto;
max-height: 8;
margin-bottom: 1;
}
#logs-scroll {
height: 1fr;
border: solid $primary;
background: $surface;
}
.controls-row {
align: left middle;
height: auto;
margin: 1 0;
}
.controls-row > * {
margin-right: 1;
}
.label {
width: auto;
margin-right: 1;
text-style: bold;
}
#system-info {
background: $panel;
border: solid $primary;
padding: 2;
height: 1fr;
}
TabbedContent {
height: 1fr;
}
TabPane {
padding: 1;
height: 1fr;
}
.tab-header {
text-style: bold;
color: $accent;
margin-bottom: 1;
}
TabPane ScrollableContainer {
height: 100%;
padding: 1;
}
"""
def __init__(self):
super().__init__()
self.platform_detector = PlatformDetector()
self.container_manager = ContainerManager()
self.env_manager = EnvManager()
def on_mount(self) -> None:
"""Initialize the application."""
# Check for runtime availability and show appropriate screen
if not self.container_manager.is_available():
notify_with_diagnostics(
self,
"No container runtime found. Please install Docker or Podman.",
severity="warning",
timeout=10
)
# Load existing config if available
config_exists = self.env_manager.load_existing_env()
# Start with welcome screen
self.push_screen(WelcomeScreen())
async def action_quit(self) -> None:
"""Quit the application."""
self.exit()
def check_runtime_requirements(self) -> tuple[bool, str]:
"""Check if runtime requirements are met."""
if not self.container_manager.is_available():
return False, self.platform_detector.get_installation_instructions()
# Check Podman macOS memory if applicable
runtime_info = self.container_manager.get_runtime_info()
if runtime_info.runtime_type.value == "podman":
is_sufficient, _, message = self.platform_detector.check_podman_macos_memory()
if not is_sufficient:
return False, f"Podman VM memory insufficient:\n{message}"
return True, "Runtime requirements satisfied"
def run_tui():
"""Run the OpenRAG TUI application."""
try:
app = OpenRAGTUI()
app.run()
except KeyboardInterrupt:
print("\nOpenRAG TUI interrupted by user")
sys.exit(0)
except Exception as e:
print(f"Error running OpenRAG TUI: {e}")
sys.exit(1)
if __name__ == "__main__":
run_tui()

View file

@ -0,0 +1 @@
"""TUI managers package."""

View file

@ -0,0 +1,586 @@
"""Container lifecycle manager for OpenRAG TUI."""
import asyncio
import json
import subprocess
import time
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import Dict, List, Optional, AsyncIterator
from ..utils.platform import PlatformDetector, RuntimeInfo, RuntimeType
from utils.gpu_detection import detect_gpu_devices
class ServiceStatus(Enum):
"""Container service status."""
UNKNOWN = "unknown"
RUNNING = "running"
STOPPED = "stopped"
STARTING = "starting"
STOPPING = "stopping"
ERROR = "error"
MISSING = "missing"
@dataclass
class ServiceInfo:
"""Container service information."""
name: str
status: ServiceStatus
health: Optional[str] = None
ports: List[str] = field(default_factory=list)
image: Optional[str] = None
image_digest: Optional[str] = None
created: Optional[str] = None
def __post_init__(self):
if self.ports is None:
self.ports = []
class ContainerManager:
"""Manages Docker/Podman container lifecycle for OpenRAG."""
def __init__(self, compose_file: Optional[Path] = None):
self.platform_detector = PlatformDetector()
self.runtime_info = self.platform_detector.detect_runtime()
self.compose_file = compose_file or Path("docker-compose.yml")
self.cpu_compose_file = Path("docker-compose-cpu.yml")
self.services_cache: Dict[str, ServiceInfo] = {}
self.last_status_update = 0
# Auto-select CPU compose if no GPU available
try:
has_gpu, _ = detect_gpu_devices()
self.use_cpu_compose = not has_gpu
except Exception:
self.use_cpu_compose = True
# Expected services based on compose files
self.expected_services = [
"openrag-backend",
"openrag-frontend",
"opensearch",
"dashboards",
"langflow"
]
# Map container names to service names
self.container_name_map = {
"openrag-backend": "openrag-backend",
"openrag-frontend": "openrag-frontend",
"os": "opensearch",
"osdash": "dashboards",
"langflow": "langflow"
}
def is_available(self) -> bool:
"""Check if container runtime is available."""
return self.runtime_info.runtime_type != RuntimeType.NONE
def get_runtime_info(self) -> RuntimeInfo:
"""Get container runtime information."""
return self.runtime_info
def get_installation_help(self) -> str:
"""Get installation instructions if runtime is not available."""
return self.platform_detector.get_installation_instructions()
async def _run_compose_command(self, args: List[str], cpu_mode: Optional[bool] = None) -> tuple[bool, str, str]:
"""Run a compose command and return (success, stdout, stderr)."""
if not self.is_available():
return False, "", "No container runtime available"
if cpu_mode is None:
cpu_mode = self.use_cpu_compose
compose_file = self.cpu_compose_file if cpu_mode else self.compose_file
cmd = self.runtime_info.compose_command + ["-f", str(compose_file)] + args
try:
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
cwd=Path.cwd()
)
stdout, stderr = await process.communicate()
stdout_text = stdout.decode() if stdout else ""
stderr_text = stderr.decode() if stderr else ""
success = process.returncode == 0
return success, stdout_text, stderr_text
except Exception as e:
return False, "", f"Command execution failed: {e}"
async def _run_compose_command_streaming(self, args: List[str], cpu_mode: Optional[bool] = None) -> AsyncIterator[str]:
"""Run a compose command and yield output lines in real-time."""
if not self.is_available():
yield "No container runtime available"
return
if cpu_mode is None:
cpu_mode = self.use_cpu_compose
compose_file = self.cpu_compose_file if cpu_mode else self.compose_file
cmd = self.runtime_info.compose_command + ["-f", str(compose_file)] + args
try:
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.STDOUT, # Combine stderr with stdout for unified output
cwd=Path.cwd()
)
# Simple approach: read line by line and yield each one
while True:
line = await process.stdout.readline()
if not line:
break
line_text = line.decode().rstrip()
if line_text:
yield line_text
# Wait for process to complete
await process.wait()
except Exception as e:
yield f"Command execution failed: {e}"
async def _run_runtime_command(self, args: List[str]) -> tuple[bool, str, str]:
"""Run a runtime command (docker/podman) and return (success, stdout, stderr)."""
if not self.is_available():
return False, "", "No container runtime available"
cmd = self.runtime_info.runtime_command + args
try:
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
stdout_text = stdout.decode() if stdout else ""
stderr_text = stderr.decode() if stderr else ""
success = process.returncode == 0
return success, stdout_text, stderr_text
except Exception as e:
return False, "", f"Command execution failed: {e}"
def _process_service_json(self, service: Dict, services: Dict[str, ServiceInfo]) -> None:
"""Process a service JSON object and add it to the services dict."""
# Debug print to see the actual service data
print(f"DEBUG: Processing service data: {json.dumps(service, indent=2)}")
container_name = service.get("Name", "")
# Map container name to service name
service_name = self.container_name_map.get(container_name)
if not service_name:
return
state = service.get("State", "").lower()
# Map compose states to our status enum
if "running" in state:
status = ServiceStatus.RUNNING
elif "exited" in state or "stopped" in state:
status = ServiceStatus.STOPPED
elif "starting" in state:
status = ServiceStatus.STARTING
else:
status = ServiceStatus.UNKNOWN
# Extract health - use Status if Health is empty
health = service.get("Health", "") or service.get("Status", "N/A")
# Extract ports
ports_str = service.get("Ports", "")
ports = [p.strip() for p in ports_str.split(",") if p.strip()] if ports_str else []
# Extract image
image = service.get("Image", "N/A")
services[service_name] = ServiceInfo(
name=service_name,
status=status,
health=health,
ports=ports,
image=image,
)
async def get_service_status(self, force_refresh: bool = False) -> Dict[str, ServiceInfo]:
"""Get current status of all services."""
current_time = time.time()
# Use cache if recent and not forcing refresh
if not force_refresh and current_time - self.last_status_update < 5:
return self.services_cache
services = {}
# Different approach for Podman vs Docker
if self.runtime_info.runtime_type == RuntimeType.PODMAN:
# For Podman, use direct podman ps command instead of compose
cmd = ["ps", "--all", "--format", "json"]
success, stdout, stderr = await self._run_runtime_command(cmd)
if success and stdout.strip():
try:
containers = json.loads(stdout.strip())
for container in containers:
# Get container name and map to service name
names = container.get("Names", [])
if not names:
continue
container_name = names[0]
service_name = self.container_name_map.get(container_name)
if not service_name:
continue
# Get container state
state = container.get("State", "").lower()
if "running" in state:
status = ServiceStatus.RUNNING
elif "exited" in state or "stopped" in state:
status = ServiceStatus.STOPPED
elif "created" in state:
status = ServiceStatus.STARTING
else:
status = ServiceStatus.UNKNOWN
# Get other container info
image = container.get("Image", "N/A")
ports = []
# Handle case where Ports might be None instead of an empty list
container_ports = container.get("Ports") or []
if isinstance(container_ports, list):
for port in container_ports:
host_port = port.get("host_port")
container_port = port.get("container_port")
if host_port and container_port:
ports.append(f"{host_port}:{container_port}")
services[service_name] = ServiceInfo(
name=service_name,
status=status,
health=state,
ports=ports,
image=image,
)
except json.JSONDecodeError:
pass
else:
# For Docker, use compose ps command
success, stdout, stderr = await self._run_compose_command(["ps", "--format", "json"])
if success and stdout.strip():
try:
# Handle both single JSON object (Podman) and multiple JSON objects (Docker)
if stdout.strip().startswith('[') and stdout.strip().endswith(']'):
# JSON array format
service_list = json.loads(stdout.strip())
for service in service_list:
self._process_service_json(service, services)
else:
# Line-by-line JSON format
for line in stdout.strip().split('\n'):
if line.strip() and line.startswith('{'):
service = json.loads(line)
self._process_service_json(service, services)
except json.JSONDecodeError:
# Fallback to parsing text output
lines = stdout.strip().split('\n')
if len(lines) > 1: # Make sure we have at least a header and one line
for line in lines[1:]: # Skip header
if line.strip():
parts = line.split()
if len(parts) >= 3:
name = parts[0]
# Only include our expected services
if name not in self.expected_services:
continue
state = parts[2].lower()
if "up" in state:
status = ServiceStatus.RUNNING
elif "exit" in state:
status = ServiceStatus.STOPPED
else:
status = ServiceStatus.UNKNOWN
services[name] = ServiceInfo(name=name, status=status)
# Add expected services that weren't found
for expected in self.expected_services:
if expected not in services:
services[expected] = ServiceInfo(name=expected, status=ServiceStatus.MISSING)
self.services_cache = services
self.last_status_update = current_time
return services
async def get_images_digests(self, images: List[str]) -> Dict[str, str]:
"""Return a map of image -> digest/ID (sha256:...)."""
digests: Dict[str, str] = {}
for image in images:
if not image or image in digests:
continue
success, stdout, _ = await self._run_runtime_command([
"image", "inspect", image, "--format", "{{.Id}}"
])
if success and stdout.strip():
digests[image] = stdout.strip().splitlines()[0]
return digests
def _parse_compose_images(self) -> list[str]:
"""Best-effort parse of image names from compose files without YAML dependency."""
images: set[str] = set()
for compose in [self.compose_file, self.cpu_compose_file]:
try:
if not compose.exists():
continue
for line in compose.read_text().splitlines():
line = line.strip()
if not line or line.startswith('#'):
continue
if line.startswith('image:'):
# image: repo/name:tag
val = line.split(':', 1)[1].strip()
# Remove quotes if present
if (val.startswith('"') and val.endswith('"')) or (val.startswith("'") and val.endswith("'")):
val = val[1:-1]
images.add(val)
except Exception:
continue
return list(images)
async def get_project_images_info(self) -> list[tuple[str, str]]:
"""
Return list of (image, digest_or_id) for images referenced by compose files.
If an image isn't present locally, returns '-' for its digest.
"""
expected = self._parse_compose_images()
results: list[tuple[str, str]] = []
for image in expected:
digest = '-'
success, stdout, _ = await self._run_runtime_command([
'image', 'inspect', image, '--format', '{{.Id}}'
])
if success and stdout.strip():
digest = stdout.strip().splitlines()[0]
results.append((image, digest))
results.sort(key=lambda x: x[0])
return results
async def start_services(self, cpu_mode: bool = False) -> AsyncIterator[tuple[bool, str]]:
"""Start all services and yield progress updates."""
yield False, "Starting OpenRAG services..."
success, stdout, stderr = await self._run_compose_command(["up", "-d"], cpu_mode)
if success:
yield True, "Services started successfully"
else:
yield False, f"Failed to start services: {stderr}"
async def stop_services(self) -> AsyncIterator[tuple[bool, str]]:
"""Stop all services and yield progress updates."""
yield False, "Stopping OpenRAG services..."
success, stdout, stderr = await self._run_compose_command(["down"])
if success:
yield True, "Services stopped successfully"
else:
yield False, f"Failed to stop services: {stderr}"
async def restart_services(self, cpu_mode: bool = False) -> AsyncIterator[tuple[bool, str]]:
"""Restart all services and yield progress updates."""
yield False, "Restarting OpenRAG services..."
success, stdout, stderr = await self._run_compose_command(["restart"], cpu_mode)
if success:
yield True, "Services restarted successfully"
else:
yield False, f"Failed to restart services: {stderr}"
async def upgrade_services(self, cpu_mode: bool = False) -> AsyncIterator[tuple[bool, str]]:
"""Upgrade services (pull latest images and restart) and yield progress updates."""
yield False, "Pulling latest images..."
# Pull latest images with streaming output
pull_success = True
async for line in self._run_compose_command_streaming(["pull"], cpu_mode):
yield False, line
# Check for error patterns in the output
if "error" in line.lower() or "failed" in line.lower():
pull_success = False
if not pull_success:
yield False, "Failed to pull some images, but continuing with restart..."
yield False, "Images updated, restarting services..."
# Restart with new images using streaming output
restart_success = True
async for line in self._run_compose_command_streaming(["up", "-d", "--force-recreate"], cpu_mode):
yield False, line
# Check for error patterns in the output
if "error" in line.lower() or "failed" in line.lower():
restart_success = False
if restart_success:
yield True, "Services upgraded and restarted successfully"
else:
yield False, "Some errors occurred during service restart"
async def reset_services(self) -> AsyncIterator[tuple[bool, str]]:
"""Reset all services (stop, remove containers/volumes, clear data) and yield progress updates."""
yield False, "Stopping all services..."
# Stop and remove everything
success, stdout, stderr = await self._run_compose_command([
"down",
"--volumes",
"--remove-orphans",
"--rmi", "local"
])
if not success:
yield False, f"Failed to stop services: {stderr}"
return
yield False, "Cleaning up container data..."
# Additional cleanup - remove any remaining containers/volumes
# This is more thorough than just compose down
await self._run_runtime_command(["system", "prune", "-f"])
yield True, "System reset completed - all containers, volumes, and local images removed"
async def get_service_logs(self, service_name: str, lines: int = 100) -> tuple[bool, str]:
"""Get logs for a specific service."""
success, stdout, stderr = await self._run_compose_command(["logs", "--tail", str(lines), service_name])
if success:
return True, stdout
else:
return False, f"Failed to get logs: {stderr}"
async def follow_service_logs(self, service_name: str) -> AsyncIterator[str]:
"""Follow logs for a specific service."""
if not self.is_available():
yield "No container runtime available"
return
compose_file = self.cpu_compose_file if self.use_cpu_compose else self.compose_file
cmd = self.runtime_info.compose_command + ["-f", str(compose_file), "logs", "-f", service_name]
try:
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.STDOUT,
cwd=Path.cwd()
)
if process.stdout:
while True:
line = await process.stdout.readline()
if line:
yield line.decode().rstrip()
else:
break
else:
yield "Error: Unable to read process output"
except Exception as e:
yield f"Error following logs: {e}"
async def get_system_stats(self) -> Dict[str, Dict[str, str]]:
"""Get system resource usage statistics."""
stats = {}
# Get container stats
success, stdout, stderr = await self._run_runtime_command(["stats", "--no-stream", "--format", "json"])
if success and stdout.strip():
try:
for line in stdout.strip().split('\n'):
if line.strip():
data = json.loads(line)
name = data.get("Name", data.get("Container", ""))
if name:
stats[name] = {
"cpu": data.get("CPUPerc", "0%"),
"memory": data.get("MemUsage", "0B / 0B"),
"memory_percent": data.get("MemPerc", "0%"),
"network": data.get("NetIO", "0B / 0B"),
"disk": data.get("BlockIO", "0B / 0B"),
}
except json.JSONDecodeError:
pass
return stats
async def debug_podman_services(self) -> str:
"""Run a direct Podman command to check services status for debugging."""
if self.runtime_info.runtime_type != RuntimeType.PODMAN:
return "Not using Podman"
# Try direct podman command
cmd = ["podman", "ps", "--all", "--format", "json"]
try:
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
cwd=Path.cwd()
)
stdout, stderr = await process.communicate()
stdout_text = stdout.decode() if stdout else ""
stderr_text = stderr.decode() if stderr else ""
result = f"Command: {' '.join(cmd)}\n"
result += f"Return code: {process.returncode}\n"
result += f"Stdout: {stdout_text}\n"
result += f"Stderr: {stderr_text}\n"
# Try to parse the output
if stdout_text.strip():
try:
containers = json.loads(stdout_text)
result += f"\nFound {len(containers)} containers:\n"
for container in containers:
name = container.get("Names", [""])[0]
state = container.get("State", "")
result += f" - {name}: {state}\n"
except json.JSONDecodeError as e:
result += f"\nFailed to parse JSON: {e}\n"
return result
except Exception as e:
return f"Error executing command: {e}"
def check_podman_macos_memory(self) -> tuple[bool, str]:
"""Check if Podman VM has sufficient memory on macOS."""
if self.runtime_info.runtime_type != RuntimeType.PODMAN:
return True, "Not using Podman"
is_sufficient, memory_mb, message = self.platform_detector.check_podman_macos_memory()
return is_sufficient, message

View file

@ -0,0 +1,296 @@
"""Environment configuration manager for OpenRAG TUI."""
import os
import secrets
import string
from datetime import datetime
from pathlib import Path
from typing import Dict, Optional, List
from dataclasses import dataclass, field
from ..utils.validation import (
validate_openai_api_key,
validate_google_oauth_client_id,
validate_non_empty,
validate_url,
validate_documents_paths,
sanitize_env_value
)
@dataclass
class EnvConfig:
"""Environment configuration data."""
# Core settings
openai_api_key: str = ""
opensearch_password: str = ""
langflow_secret_key: str = ""
langflow_superuser: str = "admin"
langflow_superuser_password: str = ""
flow_id: str = "1098eea1-6649-4e1d-aed1-b77249fb8dd0"
# OAuth settings
google_oauth_client_id: str = ""
google_oauth_client_secret: str = ""
microsoft_graph_oauth_client_id: str = ""
microsoft_graph_oauth_client_secret: str = ""
# Optional settings
webhook_base_url: str = ""
aws_access_key_id: str = ""
aws_secret_access_key: str = ""
langflow_public_url: str = ""
# Langflow auth settings
langflow_auto_login: str = "False"
langflow_new_user_is_active: str = "False"
langflow_enable_superuser_cli: str = "False"
# Document paths (comma-separated)
openrag_documents_paths: str = "./documents"
# Validation errors
validation_errors: Dict[str, str] = field(default_factory=dict)
class EnvManager:
"""Manages environment configuration for OpenRAG."""
def __init__(self, env_file: Optional[Path] = None):
self.env_file = env_file or Path(".env")
self.config = EnvConfig()
def generate_secure_password(self) -> str:
"""Generate a secure password for OpenSearch."""
# Generate a 16-character password with letters, digits, and symbols
alphabet = string.ascii_letters + string.digits + "!@#$%^&*"
return ''.join(secrets.choice(alphabet) for _ in range(16))
def generate_langflow_secret_key(self) -> str:
"""Generate a secure secret key for Langflow."""
return secrets.token_urlsafe(32)
def load_existing_env(self) -> bool:
"""Load existing .env file if it exists."""
if not self.env_file.exists():
return False
try:
with open(self.env_file, 'r') as f:
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
if '=' in line:
key, value = line.split('=', 1)
key = key.strip()
value = sanitize_env_value(value)
# Map env vars to config attributes
attr_map = {
'OPENAI_API_KEY': 'openai_api_key',
'OPENSEARCH_PASSWORD': 'opensearch_password',
'LANGFLOW_SECRET_KEY': 'langflow_secret_key',
'LANGFLOW_SUPERUSER': 'langflow_superuser',
'LANGFLOW_SUPERUSER_PASSWORD': 'langflow_superuser_password',
'FLOW_ID': 'flow_id',
'GOOGLE_OAUTH_CLIENT_ID': 'google_oauth_client_id',
'GOOGLE_OAUTH_CLIENT_SECRET': 'google_oauth_client_secret',
'MICROSOFT_GRAPH_OAUTH_CLIENT_ID': 'microsoft_graph_oauth_client_id',
'MICROSOFT_GRAPH_OAUTH_CLIENT_SECRET': 'microsoft_graph_oauth_client_secret',
'WEBHOOK_BASE_URL': 'webhook_base_url',
'AWS_ACCESS_KEY_ID': 'aws_access_key_id',
'AWS_SECRET_ACCESS_KEY': 'aws_secret_access_key',
'LANGFLOW_PUBLIC_URL': 'langflow_public_url',
'OPENRAG_DOCUMENTS_PATHS': 'openrag_documents_paths',
'LANGFLOW_AUTO_LOGIN': 'langflow_auto_login',
'LANGFLOW_NEW_USER_IS_ACTIVE': 'langflow_new_user_is_active',
'LANGFLOW_ENABLE_SUPERUSER_CLI': 'langflow_enable_superuser_cli',
}
if key in attr_map:
setattr(self.config, attr_map[key], value)
return True
except Exception as e:
print(f"Error loading .env file: {e}")
return False
def setup_secure_defaults(self) -> None:
"""Set up secure default values for passwords and keys."""
if not self.config.opensearch_password:
self.config.opensearch_password = self.generate_secure_password()
if not self.config.langflow_secret_key:
self.config.langflow_secret_key = self.generate_langflow_secret_key()
if not self.config.langflow_superuser_password:
self.config.langflow_superuser_password = self.generate_secure_password()
def validate_config(self, mode: str = "full") -> bool:
"""
Validate the current configuration.
Args:
mode: "no_auth" for minimal validation, "full" for complete validation
"""
self.config.validation_errors.clear()
# Always validate OpenAI API key
if not validate_openai_api_key(self.config.openai_api_key):
self.config.validation_errors['openai_api_key'] = "Invalid OpenAI API key format (should start with sk-)"
# Validate documents paths only if provided (optional)
if self.config.openrag_documents_paths:
is_valid, error_msg, _ = validate_documents_paths(self.config.openrag_documents_paths)
if not is_valid:
self.config.validation_errors['openrag_documents_paths'] = error_msg
# Validate required fields
if not validate_non_empty(self.config.opensearch_password):
self.config.validation_errors['opensearch_password'] = "OpenSearch password is required"
# Langflow secret key is auto-generated; no user input required
if not validate_non_empty(self.config.langflow_superuser_password):
self.config.validation_errors['langflow_superuser_password'] = "Langflow superuser password is required"
if mode == "full":
# Validate OAuth settings if provided
if self.config.google_oauth_client_id and not validate_google_oauth_client_id(self.config.google_oauth_client_id):
self.config.validation_errors['google_oauth_client_id'] = "Invalid Google OAuth client ID format"
if self.config.google_oauth_client_id and not validate_non_empty(self.config.google_oauth_client_secret):
self.config.validation_errors['google_oauth_client_secret'] = "Google OAuth client secret required when client ID is provided"
if self.config.microsoft_graph_oauth_client_id and not validate_non_empty(self.config.microsoft_graph_oauth_client_secret):
self.config.validation_errors['microsoft_graph_oauth_client_secret'] = "Microsoft Graph client secret required when client ID is provided"
# Validate optional URLs if provided
if self.config.webhook_base_url and not validate_url(self.config.webhook_base_url):
self.config.validation_errors['webhook_base_url'] = "Invalid webhook URL format"
if self.config.langflow_public_url and not validate_url(self.config.langflow_public_url):
self.config.validation_errors['langflow_public_url'] = "Invalid Langflow public URL format"
return len(self.config.validation_errors) == 0
def save_env_file(self) -> bool:
"""Save current configuration to .env file."""
try:
# Ensure secure defaults (including Langflow secret key) are set before saving
self.setup_secure_defaults()
# Create timestamped backup if file exists
if self.env_file.exists():
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
backup_file = self.env_file.with_suffix(f'.env.backup.{timestamp}')
self.env_file.rename(backup_file)
with open(self.env_file, 'w') as f:
f.write("# OpenRAG Environment Configuration\n")
f.write("# Generated by OpenRAG TUI\n\n")
# Core settings
f.write("# Core settings\n")
f.write(f"LANGFLOW_SECRET_KEY={self.config.langflow_secret_key}\n")
f.write(f"LANGFLOW_SUPERUSER={self.config.langflow_superuser}\n")
f.write(f"LANGFLOW_SUPERUSER_PASSWORD={self.config.langflow_superuser_password}\n")
f.write(f"FLOW_ID={self.config.flow_id}\n")
f.write(f"OPENSEARCH_PASSWORD={self.config.opensearch_password}\n")
f.write(f"OPENAI_API_KEY={self.config.openai_api_key}\n")
f.write(f"OPENRAG_DOCUMENTS_PATHS={self.config.openrag_documents_paths}\n")
f.write("\n")
# Langflow auth settings
f.write("# Langflow auth settings\n")
f.write(f"LANGFLOW_AUTO_LOGIN={self.config.langflow_auto_login}\n")
f.write(f"LANGFLOW_NEW_USER_IS_ACTIVE={self.config.langflow_new_user_is_active}\n")
f.write(f"LANGFLOW_ENABLE_SUPERUSER_CLI={self.config.langflow_enable_superuser_cli}\n")
f.write("\n")
# OAuth settings
if self.config.google_oauth_client_id or self.config.google_oauth_client_secret:
f.write("# Google OAuth settings\n")
f.write(f"GOOGLE_OAUTH_CLIENT_ID={self.config.google_oauth_client_id}\n")
f.write(f"GOOGLE_OAUTH_CLIENT_SECRET={self.config.google_oauth_client_secret}\n")
f.write("\n")
if self.config.microsoft_graph_oauth_client_id or self.config.microsoft_graph_oauth_client_secret:
f.write("# Microsoft Graph OAuth settings\n")
f.write(f"MICROSOFT_GRAPH_OAUTH_CLIENT_ID={self.config.microsoft_graph_oauth_client_id}\n")
f.write(f"MICROSOFT_GRAPH_OAUTH_CLIENT_SECRET={self.config.microsoft_graph_oauth_client_secret}\n")
f.write("\n")
# Optional settings
optional_vars = [
("WEBHOOK_BASE_URL", self.config.webhook_base_url),
("AWS_ACCESS_KEY_ID", self.config.aws_access_key_id),
("AWS_SECRET_ACCESS_KEY", self.config.aws_secret_access_key),
("LANGFLOW_PUBLIC_URL", self.config.langflow_public_url),
]
optional_written = False
for var_name, var_value in optional_vars:
if var_value:
if not optional_written:
f.write("# Optional settings\n")
optional_written = True
f.write(f"{var_name}={var_value}\n")
if optional_written:
f.write("\n")
return True
except Exception as e:
print(f"Error saving .env file: {e}")
return False
def get_no_auth_setup_fields(self) -> List[tuple[str, str, str, bool]]:
"""Get fields required for no-auth setup mode. Returns (field_name, display_name, placeholder, can_generate)."""
return [
("openai_api_key", "OpenAI API Key", "sk-...", False),
("opensearch_password", "OpenSearch Password", "Will be auto-generated if empty", True),
("langflow_superuser_password", "Langflow Superuser Password", "Will be auto-generated if empty", True),
("openrag_documents_paths", "Documents Paths", "./documents,/path/to/more/docs", False),
]
def get_full_setup_fields(self) -> List[tuple[str, str, str, bool]]:
"""Get all fields for full setup mode."""
base_fields = self.get_no_auth_setup_fields()
oauth_fields = [
("google_oauth_client_id", "Google OAuth Client ID", "xxx.apps.googleusercontent.com", False),
("google_oauth_client_secret", "Google OAuth Client Secret", "", False),
("microsoft_graph_oauth_client_id", "Microsoft Graph Client ID", "", False),
("microsoft_graph_oauth_client_secret", "Microsoft Graph Client Secret", "", False),
]
optional_fields = [
("webhook_base_url", "Webhook Base URL (optional)", "https://your-domain.com", False),
("aws_access_key_id", "AWS Access Key ID (optional)", "", False),
("aws_secret_access_key", "AWS Secret Access Key (optional)", "", False),
("langflow_public_url", "Langflow Public URL (optional)", "http://localhost:7860", False),
]
return base_fields + oauth_fields + optional_fields
def generate_compose_volume_mounts(self) -> List[str]:
"""Generate Docker Compose volume mount strings from documents paths."""
is_valid, _, validated_paths = validate_documents_paths(self.config.openrag_documents_paths)
if not is_valid:
return ["./documents:/app/documents:Z"] # fallback
volume_mounts = []
for i, path in enumerate(validated_paths):
if i == 0:
# First path maps to the default /app/documents
volume_mounts.append(f"{path}:/app/documents:Z")
else:
# Additional paths map to numbered directories
volume_mounts.append(f"{path}:/app/documents{i+1}:Z")
return volume_mounts

View file

@ -0,0 +1 @@
"""TUI screens package."""

530
src/tui/screens/config.py Normal file
View file

@ -0,0 +1,530 @@
"""Configuration screen for OpenRAG TUI."""
from textual.app import ComposeResult
from textual.containers import Container, Vertical, Horizontal, ScrollableContainer
from textual.screen import Screen
from textual.widgets import Header, Footer, Static, Button, Input, Label, TabbedContent, TabPane
from textual.validation import ValidationResult, Validator
from rich.text import Text
from pathlib import Path
from ..managers.env_manager import EnvManager
from ..utils.validation import validate_openai_api_key, validate_documents_paths
from pathlib import Path
class OpenAIKeyValidator(Validator):
"""Validator for OpenAI API keys."""
def validate(self, value: str) -> ValidationResult:
if not value:
return self.success()
if validate_openai_api_key(value):
return self.success()
else:
return self.failure("Invalid OpenAI API key format (should start with sk-)")
class DocumentsPathValidator(Validator):
"""Validator for documents paths."""
def validate(self, value: str) -> ValidationResult:
# Optional: allow empty value
if not value:
return self.success()
is_valid, error_msg, _ = validate_documents_paths(value)
if is_valid:
return self.success()
else:
return self.failure(error_msg)
class ConfigScreen(Screen):
"""Configuration screen for environment setup."""
BINDINGS = [
("escape", "back", "Back"),
("ctrl+s", "save", "Save"),
("ctrl+g", "generate", "Generate Passwords"),
]
def __init__(self, mode: str = "full"):
super().__init__()
self.mode = mode # "no_auth" or "full"
self.env_manager = EnvManager()
self.inputs = {}
# Load existing config if available
self.env_manager.load_existing_env()
def compose(self) -> ComposeResult:
"""Create the configuration screen layout."""
# Removed top header bar and header text
with Container(id="main-container"):
with ScrollableContainer(id="config-scroll"):
with Vertical(id="config-form"):
yield from self._create_all_fields()
yield Horizontal(
Button("Generate Passwords", variant="default", id="generate-btn"),
Button("Save Configuration", variant="success", id="save-btn"),
Button("Back", variant="default", id="back-btn"),
classes="button-row"
)
yield Footer()
def _create_header_text(self) -> Text:
"""Create the configuration header text."""
header_text = Text()
if self.mode == "no_auth":
header_text.append("Quick Setup - No Authentication\n", style="bold green")
header_text.append("Configure OpenRAG for local document processing only.\n\n", style="dim")
else:
header_text.append("Full Setup - OAuth Integration\n", style="bold cyan")
header_text.append("Configure OpenRAG with cloud service integrations.\n\n", style="dim")
header_text.append("Required fields are marked with *\n", style="yellow")
header_text.append("Use Ctrl+G to generate admin passwords\n", style="dim")
return header_text
def _create_all_fields(self) -> ComposeResult:
"""Create all configuration fields in a single scrollable layout."""
# Admin Credentials Section
yield Static("Admin Credentials", classes="tab-header")
yield Static(" ")
# OpenSearch Admin Password
yield Label("OpenSearch Admin Password *")
current_value = getattr(self.env_manager.config, "opensearch_password", "")
input_widget = Input(
placeholder="Auto-generated secure password",
value=current_value,
password=True,
id="input-opensearch_password"
)
yield input_widget
self.inputs["opensearch_password"] = input_widget
yield Static(" ")
# Langflow Admin Username
yield Label("Langflow Admin Username *")
current_value = getattr(self.env_manager.config, "langflow_superuser", "")
input_widget = Input(
placeholder="admin",
value=current_value,
id="input-langflow_superuser"
)
yield input_widget
self.inputs["langflow_superuser"] = input_widget
yield Static(" ")
# Langflow Admin Password
yield Label("Langflow Admin Password *")
current_value = getattr(self.env_manager.config, "langflow_superuser_password", "")
input_widget = Input(
placeholder="Auto-generated secure password",
value=current_value,
password=True,
id="input-langflow_superuser_password"
)
yield input_widget
self.inputs["langflow_superuser_password"] = input_widget
yield Static(" ")
yield Static(" ")
# API Keys Section
yield Static("API Keys", classes="tab-header")
yield Static(" ")
# OpenAI API Key
yield Label("OpenAI API Key *")
# Where to create OpenAI keys (helper above the box)
yield Static(Text("Get a key: https://platform.openai.com/api-keys", style="dim"), classes="helper-text")
current_value = getattr(self.env_manager.config, "openai_api_key", "")
input_widget = Input(
placeholder="sk-...",
value=current_value,
password=True,
validators=[OpenAIKeyValidator()],
id="input-openai_api_key"
)
yield input_widget
self.inputs["openai_api_key"] = input_widget
yield Static(" ")
# Add OAuth fields only in full mode
if self.mode == "full":
# Google OAuth Client ID
yield Label("Google OAuth Client ID")
# Where to create Google OAuth credentials (helper above the box)
yield Static(Text("Create credentials: https://console.cloud.google.com/apis/credentials", style="dim"), classes="helper-text")
# Callback URL guidance for Google OAuth
yield Static(
Text(
"Important: add an Authorized redirect URI to your Google OAuth app(s):\n"
" - Local: http://localhost:3000/auth/callback\n"
" - Prod: https://your-domain.com/auth/callback\n"
"If you use separate apps for login and connectors, add this URL to BOTH.",
style="dim"
),
classes="helper-text"
)
current_value = getattr(self.env_manager.config, "google_oauth_client_id", "")
input_widget = Input(
placeholder="xxx.apps.googleusercontent.com",
value=current_value,
id="input-google_oauth_client_id"
)
yield input_widget
self.inputs["google_oauth_client_id"] = input_widget
yield Static(" ")
# Google OAuth Client Secret
yield Label("Google OAuth Client Secret")
current_value = getattr(self.env_manager.config, "google_oauth_client_secret", "")
input_widget = Input(
placeholder="",
value=current_value,
password=True,
id="input-google_oauth_client_secret"
)
yield input_widget
self.inputs["google_oauth_client_secret"] = input_widget
yield Static(" ")
# Microsoft Graph Client ID
yield Label("Microsoft Graph Client ID")
# Where to create Microsoft app registrations (helper above the box)
yield Static(Text("Create app: https://portal.azure.com/#view/Microsoft_AAD_RegisteredApps/ApplicationsListBlade", style="dim"), classes="helper-text")
# Callback URL guidance for Microsoft OAuth
yield Static(
Text(
"Important: configure a Web redirect URI for your Microsoft app(s):\n"
" - Local: http://localhost:3000/auth/callback\n"
" - Prod: https://your-domain.com/auth/callback\n"
"If you use separate apps for login and connectors, add this URI to BOTH.",
style="dim"
),
classes="helper-text"
)
current_value = getattr(self.env_manager.config, "microsoft_graph_oauth_client_id", "")
input_widget = Input(
placeholder="",
value=current_value,
id="input-microsoft_graph_oauth_client_id"
)
yield input_widget
self.inputs["microsoft_graph_oauth_client_id"] = input_widget
yield Static(" ")
# Microsoft Graph Client Secret
yield Label("Microsoft Graph Client Secret")
current_value = getattr(self.env_manager.config, "microsoft_graph_oauth_client_secret", "")
input_widget = Input(
placeholder="",
value=current_value,
password=True,
id="input-microsoft_graph_oauth_client_secret"
)
yield input_widget
self.inputs["microsoft_graph_oauth_client_secret"] = input_widget
yield Static(" ")
# AWS Access Key ID
yield Label("AWS Access Key ID")
# Where to create AWS keys (helper above the box)
yield Static(Text("Create keys: https://console.aws.amazon.com/iam/home#/security_credentials", style="dim"), classes="helper-text")
current_value = getattr(self.env_manager.config, "aws_access_key_id", "")
input_widget = Input(
placeholder="",
value=current_value,
id="input-aws_access_key_id"
)
yield input_widget
self.inputs["aws_access_key_id"] = input_widget
yield Static(" ")
# AWS Secret Access Key
yield Label("AWS Secret Access Key")
current_value = getattr(self.env_manager.config, "aws_secret_access_key", "")
input_widget = Input(
placeholder="",
value=current_value,
password=True,
id="input-aws_secret_access_key"
)
yield input_widget
self.inputs["aws_secret_access_key"] = input_widget
yield Static(" ")
yield Static(" ")
# Other Settings Section
yield Static("Others", classes="tab-header")
yield Static(" ")
# Documents Paths (optional) + picker action button on next line
yield Label("Documents Paths")
current_value = getattr(self.env_manager.config, "openrag_documents_paths", "")
input_widget = Input(
placeholder="./documents,/path/to/more/docs",
value=current_value,
validators=[DocumentsPathValidator()],
id="input-openrag_documents_paths"
)
yield input_widget
# Actions row with pick button
yield Horizontal(Button("Pick…", id="pick-docs-btn"), id="docs-path-actions", classes="controls-row")
self.inputs["openrag_documents_paths"] = input_widget
yield Static(" ")
# Langflow Auth Settings
yield Static("Langflow Auth Settings", classes="tab-header")
yield Static(" ")
# Langflow Auto Login
yield Label("Langflow Auto Login")
current_value = getattr(self.env_manager.config, "langflow_auto_login", "False")
input_widget = Input(
placeholder="False",
value=current_value,
id="input-langflow_auto_login"
)
yield input_widget
self.inputs["langflow_auto_login"] = input_widget
yield Static(" ")
# Langflow New User Is Active
yield Label("Langflow New User Is Active")
current_value = getattr(self.env_manager.config, "langflow_new_user_is_active", "False")
input_widget = Input(
placeholder="False",
value=current_value,
id="input-langflow_new_user_is_active"
)
yield input_widget
self.inputs["langflow_new_user_is_active"] = input_widget
yield Static(" ")
# Langflow Enable Superuser CLI
yield Label("Langflow Enable Superuser CLI")
current_value = getattr(self.env_manager.config, "langflow_enable_superuser_cli", "False")
input_widget = Input(
placeholder="False",
value=current_value,
id="input-langflow_enable_superuser_cli"
)
yield input_widget
self.inputs["langflow_enable_superuser_cli"] = input_widget
yield Static(" ")
yield Static(" ")
# Langflow Secret Key removed from UI; generated automatically on save
# Add optional fields only in full mode
if self.mode == "full":
# Webhook Base URL
yield Label("Webhook Base URL")
current_value = getattr(self.env_manager.config, "webhook_base_url", "")
input_widget = Input(
placeholder="https://your-domain.com",
value=current_value,
id="input-webhook_base_url"
)
yield input_widget
self.inputs["webhook_base_url"] = input_widget
yield Static(" ")
# Langflow Public URL
yield Label("Langflow Public URL")
current_value = getattr(self.env_manager.config, "langflow_public_url", "")
input_widget = Input(
placeholder="http://localhost:7860",
value=current_value,
id="input-langflow_public_url"
)
yield input_widget
self.inputs["langflow_public_url"] = input_widget
yield Static(" ")
def _create_field(self, field_name: str, display_name: str, placeholder: str, can_generate: bool, required: bool = False) -> ComposeResult:
"""Create a single form field."""
# Create label
label_text = f"{display_name}"
if required:
label_text += " *"
yield Label(label_text)
# Get current value
current_value = getattr(self.env_manager.config, field_name, "")
# Create input with appropriate validator
if field_name == "openai_api_key":
input_widget = Input(
placeholder=placeholder,
value=current_value,
password=True,
validators=[OpenAIKeyValidator()],
id=f"input-{field_name}"
)
elif field_name == "openrag_documents_paths":
input_widget = Input(
placeholder=placeholder,
value=current_value,
validators=[DocumentsPathValidator()],
id=f"input-{field_name}"
)
elif "password" in field_name or "secret" in field_name:
input_widget = Input(
placeholder=placeholder,
value=current_value,
password=True,
id=f"input-{field_name}"
)
else:
input_widget = Input(
placeholder=placeholder,
value=current_value,
id=f"input-{field_name}"
)
yield input_widget
self.inputs[field_name] = input_widget
# Add spacing
yield Static(" ")
def on_mount(self) -> None:
"""Initialize the screen when mounted."""
# Focus the first input field
try:
# Find the first input field and focus it
inputs = self.query(Input)
if inputs:
inputs[0].focus()
except Exception:
pass
def on_button_pressed(self, event: Button.Pressed) -> None:
"""Handle button presses."""
if event.button.id == "generate-btn":
self.action_generate()
elif event.button.id == "save-btn":
self.action_save()
elif event.button.id == "back-btn":
self.action_back()
elif event.button.id == "pick-docs-btn":
self.action_pick_documents_path()
def action_generate(self) -> None:
"""Generate secure passwords for admin accounts."""
self.env_manager.setup_secure_defaults()
# Update input fields with generated values
for field_name, input_widget in self.inputs.items():
if field_name in ["opensearch_password", "langflow_superuser_password"]:
new_value = getattr(self.env_manager.config, field_name)
input_widget.value = new_value
self.notify("Generated secure passwords", severity="information")
def action_save(self) -> None:
"""Save the configuration."""
# Update config from input fields
for field_name, input_widget in self.inputs.items():
setattr(self.env_manager.config, field_name, input_widget.value)
# Validate the configuration
if not self.env_manager.validate_config(self.mode):
error_messages = []
for field, error in self.env_manager.config.validation_errors.items():
error_messages.append(f"{field}: {error}")
self.notify(f"Validation failed:\n" + "\n".join(error_messages[:3]), severity="error")
return
# Save to file
if self.env_manager.save_env_file():
self.notify("Configuration saved successfully!", severity="information")
# Switch to monitor screen
from .monitor import MonitorScreen
self.app.push_screen(MonitorScreen())
else:
self.notify("Failed to save configuration", severity="error")
def action_back(self) -> None:
"""Go back to welcome screen."""
self.app.pop_screen()
def action_pick_documents_path(self) -> None:
"""Open textual-fspicker to select a path and append it to the input."""
try:
import importlib
fsp = importlib.import_module("textual_fspicker")
except Exception:
self.notify("textual-fspicker not available", severity="warning")
return
# Determine starting path from current input if possible
input_widget = self.inputs.get("openrag_documents_paths")
start = Path.home()
if input_widget and input_widget.value:
first = input_widget.value.split(",")[0].strip()
if first:
start = Path(first).expanduser()
# Prefer SelectDirectory for directories; fallback to FileOpen
PickerClass = getattr(fsp, "SelectDirectory", None) or getattr(fsp, "FileOpen", None)
if PickerClass is None:
self.notify("No compatible picker found in textual-fspicker", severity="warning")
return
try:
picker = PickerClass(location=start)
except Exception:
try:
picker = PickerClass(start)
except Exception:
self.notify("Could not initialize textual-fspicker", severity="warning")
return
def _append_path(result) -> None:
if not result:
return
path_str = str(result)
if input_widget is None:
return
current = input_widget.value or ""
paths = [p.strip() for p in current.split(",") if p.strip()]
if path_str not in paths:
paths.append(path_str)
input_widget.value = ",".join(paths)
# Push with callback when supported; otherwise, use on_screen_dismissed fallback
try:
self.app.push_screen(picker, _append_path) # type: ignore[arg-type]
except TypeError:
self._docs_pick_callback = _append_path # type: ignore[attr-defined]
self.app.push_screen(picker)
def on_screen_dismissed(self, event) -> None: # type: ignore[override]
try:
# textual-fspicker screens should dismiss with a result; hand to callback if present
cb = getattr(self, "_docs_pick_callback", None)
if cb is not None:
cb(getattr(event, "result", None))
try:
delattr(self, "_docs_pick_callback")
except Exception:
pass
except Exception:
pass
def on_input_changed(self, event: Input.Changed) -> None:
"""Handle input changes for real-time validation feedback."""
# This will trigger validation display in real-time
pass

View file

@ -0,0 +1,402 @@
"""Diagnostics screen for OpenRAG TUI."""
import asyncio
import logging
import os
import datetime
from pathlib import Path
from typing import List, Optional
from textual.app import ComposeResult
from textual.containers import Container, Vertical, Horizontal, ScrollableContainer
from textual.screen import Screen
from textual.widgets import Header, Footer, Static, Button, Label, Log
from rich.text import Text
from ..managers.container_manager import ContainerManager
class DiagnosticsScreen(Screen):
"""Diagnostics screen for debugging OpenRAG."""
CSS = """
#diagnostics-log {
border: solid $accent;
padding: 1;
margin: 1;
background: $surface;
min-height: 20;
}
.button-row Button {
margin: 0 1;
}
.copy-indicator {
background: $success;
color: $text;
padding: 1;
margin: 1;
text-align: center;
}
"""
BINDINGS = [
("escape", "back", "Back"),
("r", "refresh", "Refresh"),
("ctrl+c", "copy", "Copy to Clipboard"),
("ctrl+s", "save", "Save to File"),
]
def __init__(self):
super().__init__()
self.container_manager = ContainerManager()
self._logger = logging.getLogger("openrag.diagnostics")
self._status_timer = None
def compose(self) -> ComposeResult:
"""Create the diagnostics screen layout."""
yield Header()
with Container(id="main-container"):
yield Static("OpenRAG Diagnostics", classes="tab-header")
with Horizontal(classes="button-row"):
yield Button("Refresh", variant="primary", id="refresh-btn")
yield Button("Check Podman", variant="default", id="check-podman-btn")
yield Button("Check Docker", variant="default", id="check-docker-btn")
yield Button("Copy to Clipboard", variant="default", id="copy-btn")
yield Button("Save to File", variant="default", id="save-btn")
yield Button("Back", variant="default", id="back-btn")
# Status indicator for copy/save operations
yield Static("", id="copy-status", classes="copy-indicator")
with ScrollableContainer(id="diagnostics-scroll"):
yield Log(id="diagnostics-log", highlight=True)
yield Footer()
def on_mount(self) -> None:
"""Initialize the screen."""
self.run_diagnostics()
# Focus the first button (refresh-btn)
try:
self.query_one("#refresh-btn").focus()
except Exception:
pass
def on_button_pressed(self, event: Button.Pressed) -> None:
"""Handle button presses."""
if event.button.id == "refresh-btn":
self.action_refresh()
elif event.button.id == "check-podman-btn":
asyncio.create_task(self.check_podman())
elif event.button.id == "check-docker-btn":
asyncio.create_task(self.check_docker())
elif event.button.id == "copy-btn":
self.copy_to_clipboard()
elif event.button.id == "save-btn":
self.save_to_file()
elif event.button.id == "back-btn":
self.action_back()
def action_refresh(self) -> None:
"""Refresh diagnostics."""
self.run_diagnostics()
def action_copy(self) -> None:
"""Copy log content to clipboard (keyboard shortcut)."""
self.copy_to_clipboard()
def copy_to_clipboard(self) -> None:
"""Copy log content to clipboard."""
try:
log = self.query_one("#diagnostics-log", Log)
content = "\n".join(str(line) for line in log.lines)
status = self.query_one("#copy-status", Static)
# Try to use pyperclip if available
try:
import pyperclip
pyperclip.copy(content)
self.notify("Copied to clipboard", severity="information")
status.update("✓ Content copied to clipboard")
self._hide_status_after_delay(status)
return
except ImportError:
pass
# Fallback to platform-specific clipboard commands
import subprocess
import platform
system = platform.system()
if system == "Darwin": # macOS
process = subprocess.Popen(
["pbcopy"], stdin=subprocess.PIPE, text=True
)
process.communicate(input=content)
self.notify("Copied to clipboard", severity="information")
status.update("✓ Content copied to clipboard")
elif system == "Windows":
process = subprocess.Popen(
["clip"], stdin=subprocess.PIPE, text=True
)
process.communicate(input=content)
self.notify("Copied to clipboard", severity="information")
status.update("✓ Content copied to clipboard")
elif system == "Linux":
# Try xclip first, then xsel
try:
process = subprocess.Popen(
["xclip", "-selection", "clipboard"],
stdin=subprocess.PIPE,
text=True
)
process.communicate(input=content)
self.notify("Copied to clipboard", severity="information")
status.update("✓ Content copied to clipboard")
except FileNotFoundError:
try:
process = subprocess.Popen(
["xsel", "--clipboard", "--input"],
stdin=subprocess.PIPE,
text=True
)
process.communicate(input=content)
self.notify("Copied to clipboard", severity="information")
status.update("✓ Content copied to clipboard")
except FileNotFoundError:
self.notify("Clipboard utilities not found. Install xclip or xsel.", severity="error")
status.update("❌ Clipboard utilities not found. Install xclip or xsel.")
else:
self.notify("Clipboard not supported on this platform", severity="error")
status.update("❌ Clipboard not supported on this platform")
self._hide_status_after_delay(status)
except Exception as e:
self.notify(f"Failed to copy to clipboard: {e}", severity="error")
status = self.query_one("#copy-status", Static)
status.update(f"❌ Failed to copy: {e}")
self._hide_status_after_delay(status)
def _hide_status_after_delay(self, status_widget: Static, delay: float = 3.0) -> None:
"""Hide the status message after a delay."""
# Cancel any existing timer
if self._status_timer:
self._status_timer.cancel()
# Create and run the timer task
self._status_timer = asyncio.create_task(self._clear_status_after_delay(status_widget, delay))
async def _clear_status_after_delay(self, status_widget: Static, delay: float) -> None:
"""Clear the status message after a delay."""
await asyncio.sleep(delay)
status_widget.update("")
def action_save(self) -> None:
"""Save log content to file (keyboard shortcut)."""
self.save_to_file()
def save_to_file(self) -> None:
"""Save log content to a file."""
try:
log = self.query_one("#diagnostics-log", Log)
content = "\n".join(str(line) for line in log.lines)
status = self.query_one("#copy-status", Static)
# Create logs directory if it doesn't exist
logs_dir = Path("logs")
logs_dir.mkdir(exist_ok=True)
# Create a timestamped filename
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
filename = logs_dir / f"openrag_diagnostics_{timestamp}.txt"
# Save to file
with open(filename, "w") as f:
f.write(content)
self.notify(f"Saved to {filename}", severity="information")
status.update(f"✓ Saved to {filename}")
# Log the save operation
self._logger.info(f"Diagnostics saved to {filename}")
self._hide_status_after_delay(status)
except Exception as e:
error_msg = f"Failed to save file: {e}"
self.notify(error_msg, severity="error")
self._logger.error(error_msg)
status = self.query_one("#copy-status", Static)
status.update(f"{error_msg}")
self._hide_status_after_delay(status)
def action_back(self) -> None:
"""Go back to previous screen."""
self.app.pop_screen()
def _get_system_info(self) -> Text:
"""Get system information text."""
info_text = Text()
runtime_info = self.container_manager.get_runtime_info()
info_text.append("Container Runtime Information\n", style="bold")
info_text.append("=" * 30 + "\n")
info_text.append(f"Type: {runtime_info.runtime_type.value}\n")
info_text.append(f"Compose Command: {' '.join(runtime_info.compose_command)}\n")
info_text.append(f"Runtime Command: {' '.join(runtime_info.runtime_command)}\n")
if runtime_info.version:
info_text.append(f"Version: {runtime_info.version}\n")
return info_text
def run_diagnostics(self) -> None:
"""Run all diagnostics."""
log = self.query_one("#diagnostics-log", Log)
log.clear()
# System information
system_info = self._get_system_info()
log.write(str(system_info))
log.write("")
# Run async diagnostics
asyncio.create_task(self._run_async_diagnostics())
async def _run_async_diagnostics(self) -> None:
"""Run asynchronous diagnostics."""
log = self.query_one("#diagnostics-log", Log)
# Check services
log.write("[bold green]Service Status[/bold green]")
services = await self.container_manager.get_service_status(force_refresh=True)
for name, info in services.items():
status_color = "green" if info.status == "running" else "red"
log.write(f"[bold]{name}[/bold]: [{status_color}]{info.status.value}[/{status_color}]")
if info.health:
log.write(f" Health: {info.health}")
if info.ports:
log.write(f" Ports: {', '.join(info.ports)}")
if info.image:
log.write(f" Image: {info.image}")
log.write("")
# Check for Podman-specific issues
if self.container_manager.runtime_info.runtime_type.name == "PODMAN":
await self.check_podman()
async def check_podman(self) -> None:
"""Run Podman-specific diagnostics."""
log = self.query_one("#diagnostics-log", Log)
log.write("[bold green]Podman Diagnostics[/bold green]")
# Check if using Podman
if self.container_manager.runtime_info.runtime_type.name != "PODMAN":
log.write("[yellow]Not using Podman[/yellow]")
return
# Check Podman version
cmd = ["podman", "--version"]
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
if process.returncode == 0:
log.write(f"Podman version: {stdout.decode().strip()}")
else:
log.write(f"[red]Failed to get Podman version: {stderr.decode().strip()}[/red]")
# Check Podman containers
cmd = ["podman", "ps", "--all"]
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
if process.returncode == 0:
log.write("Podman containers:")
for line in stdout.decode().strip().split("\n"):
log.write(f" {line}")
else:
log.write(f"[red]Failed to list Podman containers: {stderr.decode().strip()}[/red]")
# Check Podman compose
cmd = ["podman", "compose", "ps"]
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
cwd=self.container_manager.compose_file.parent
)
stdout, stderr = await process.communicate()
if process.returncode == 0:
log.write("Podman compose services:")
for line in stdout.decode().strip().split("\n"):
log.write(f" {line}")
else:
log.write(f"[red]Failed to list Podman compose services: {stderr.decode().strip()}[/red]")
log.write("")
async def check_docker(self) -> None:
"""Run Docker-specific diagnostics."""
log = self.query_one("#diagnostics-log", Log)
log.write("[bold green]Docker Diagnostics[/bold green]")
# Check if using Docker
if "DOCKER" not in self.container_manager.runtime_info.runtime_type.name:
log.write("[yellow]Not using Docker[/yellow]")
return
# Check Docker version
cmd = ["docker", "--version"]
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
if process.returncode == 0:
log.write(f"Docker version: {stdout.decode().strip()}")
else:
log.write(f"[red]Failed to get Docker version: {stderr.decode().strip()}[/red]")
# Check Docker containers
cmd = ["docker", "ps", "--all"]
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
if process.returncode == 0:
log.write("Docker containers:")
for line in stdout.decode().strip().split("\n"):
log.write(f" {line}")
else:
log.write(f"[red]Failed to list Docker containers: {stderr.decode().strip()}[/red]")
# Check Docker compose
cmd = ["docker", "compose", "ps"]
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
cwd=self.container_manager.compose_file.parent
)
stdout, stderr = await process.communicate()
if process.returncode == 0:
log.write("Docker compose services:")
for line in stdout.decode().strip().split("\n"):
log.write(f" {line}")
else:
log.write(f"[red]Failed to list Docker compose services: {stderr.decode().strip()}[/red]")
log.write("")
# Made with Bob

219
src/tui/screens/logs.py Normal file
View file

@ -0,0 +1,219 @@
"""Logs viewing screen for OpenRAG TUI."""
import asyncio
from textual.app import ComposeResult
from textual.containers import Container, Vertical, Horizontal
from textual.screen import Screen
from textual.widgets import Header, Footer, Static, Button, Select, TextArea
from textual.timer import Timer
from rich.text import Text
from ..managers.container_manager import ContainerManager
class LogsScreen(Screen):
"""Logs viewing and monitoring screen."""
BINDINGS = [
("escape", "back", "Back"),
("f", "follow", "Follow Logs"),
("c", "clear", "Clear"),
("r", "refresh", "Refresh"),
("a", "toggle_auto_scroll", "Toggle Auto Scroll"),
("g", "scroll_top", "Go to Top"),
("G", "scroll_bottom", "Go to Bottom"),
("j", "scroll_down", "Scroll Down"),
("k", "scroll_up", "Scroll Up"),
("ctrl+u", "scroll_page_up", "Page Up"),
("ctrl+f", "scroll_page_down", "Page Down"),
]
def __init__(self, initial_service: str = "openrag-backend"):
super().__init__()
self.container_manager = ContainerManager()
# Validate the initial service against available options
valid_services = ["openrag-backend", "openrag-frontend", "opensearch", "langflow", "dashboards"]
if initial_service not in valid_services:
initial_service = "openrag-backend" # fallback
self.current_service = initial_service
self.logs_area = None
self.following = False
self.follow_task = None
self.auto_scroll = True
def compose(self) -> ComposeResult:
"""Create the logs screen layout."""
yield Container(
Vertical(
Static(f"Service Logs: {self.current_service}", id="logs-title"),
self._create_logs_area(),
id="logs-content"
),
id="main-container"
)
yield Footer()
def _create_logs_area(self) -> TextArea:
"""Create the logs text area."""
self.logs_area = TextArea(
text="Loading logs...",
read_only=True,
show_line_numbers=False,
id="logs-area"
)
return self.logs_area
async def on_mount(self) -> None:
"""Initialize the screen when mounted."""
# Set the correct service in the select widget after a brief delay
try:
select = self.query_one("#service-select")
# Set a default first, then set the desired value
select.value = "openrag-backend"
if self.current_service in ["openrag-backend", "openrag-frontend", "opensearch", "langflow", "dashboards"]:
select.value = self.current_service
except Exception as e:
# If setting the service fails, just use the default
pass
await self._load_logs()
# Focus the logs area since there are no buttons
try:
self.logs_area.focus()
except Exception:
pass
def on_unmount(self) -> None:
"""Clean up when unmounting."""
self._stop_following()
async def _load_logs(self, lines: int = 200) -> None:
"""Load recent logs for the current service."""
if not self.container_manager.is_available():
self.logs_area.text = "No container runtime available"
return
success, logs = await self.container_manager.get_service_logs(self.current_service, lines)
if success:
self.logs_area.text = logs
# Scroll to bottom if auto scroll is enabled
if self.auto_scroll:
self.logs_area.scroll_end()
else:
self.logs_area.text = f"Failed to load logs: {logs}"
def _stop_following(self) -> None:
"""Stop following logs."""
self.following = False
if self.follow_task and not self.follow_task.is_finished:
self.follow_task.cancel()
# No button to update since we removed it
async def _follow_logs(self) -> None:
"""Follow logs in real-time."""
if not self.container_manager.is_available():
return
try:
async for log_line in self.container_manager.follow_service_logs(self.current_service):
if not self.following:
break
# Append new line to logs area
current_text = self.logs_area.text
new_text = current_text + "\n" + log_line
# Keep only last 1000 lines to prevent memory issues
lines = new_text.split('\n')
if len(lines) > 1000:
lines = lines[-1000:]
new_text = '\n'.join(lines)
self.logs_area.text = new_text
# Scroll to bottom if auto scroll is enabled
if self.auto_scroll:
self.logs_area.scroll_end()
except asyncio.CancelledError:
pass
except Exception as e:
if self.following: # Only show error if we're still supposed to be following
self.notify(f"Error following logs: {e}", severity="error")
finally:
self.following = False
def action_refresh(self) -> None:
"""Refresh logs."""
self._stop_following()
self.run_worker(self._load_logs())
def action_follow(self) -> None:
"""Toggle log following."""
if self.following:
self._stop_following()
else:
self.following = True
# Start following
self.follow_task = self.run_worker(self._follow_logs(), exclusive=False)
def action_clear(self) -> None:
"""Clear the logs area."""
self.logs_area.text = ""
def action_toggle_auto_scroll(self) -> None:
"""Toggle auto scroll on/off."""
self.auto_scroll = not self.auto_scroll
status = "enabled" if self.auto_scroll else "disabled"
self.notify(f"Auto scroll {status}", severity="information")
def action_scroll_top(self) -> None:
"""Scroll to the top of logs."""
self.logs_area.scroll_home()
def action_scroll_bottom(self) -> None:
"""Scroll to the bottom of logs."""
self.logs_area.scroll_end()
def action_scroll_down(self) -> None:
"""Scroll down one line."""
self.logs_area.scroll_down()
def action_scroll_up(self) -> None:
"""Scroll up one line."""
self.logs_area.scroll_up()
def action_scroll_page_up(self) -> None:
"""Scroll up one page."""
self.logs_area.scroll_page_up()
def action_scroll_page_down(self) -> None:
"""Scroll down one page."""
self.logs_area.scroll_page_down()
def on_key(self, event) -> None:
"""Handle key presses that might be intercepted by TextArea."""
key = event.key
# Handle keys that TextArea might intercept
if key == "ctrl+u":
self.action_scroll_page_up()
event.prevent_default()
elif key == "ctrl+f":
self.action_scroll_page_down()
event.prevent_default()
elif key.upper() == "G":
self.action_scroll_bottom()
event.prevent_default()
def action_back(self) -> None:
"""Go back to previous screen."""
self._stop_following()
self.app.pop_screen()

533
src/tui/screens/monitor.py Normal file
View file

@ -0,0 +1,533 @@
"""Service monitoring screen for OpenRAG TUI."""
import asyncio
import re
from typing import Literal, Any
# Define button variant type
ButtonVariant = Literal["default", "primary", "success", "warning", "error"]
from textual.app import ComposeResult
from textual.containers import Container, Vertical, Horizontal, ScrollableContainer
from textual.screen import Screen
from textual.widgets import Header, Footer, Static, Button, DataTable
from textual.timer import Timer
from rich.text import Text
from rich.table import Table
from ..managers.container_manager import ContainerManager, ServiceStatus, ServiceInfo
from ..utils.platform import RuntimeType
from ..widgets.command_modal import CommandOutputModal
from ..widgets.diagnostics_notification import notify_with_diagnostics
class MonitorScreen(Screen):
"""Service monitoring and control screen."""
BINDINGS = [
("escape", "back", "Back"),
("r", "refresh", "Refresh"),
("s", "start", "Start Services"),
("t", "stop", "Stop Services"),
("u", "upgrade", "Upgrade"),
("x", "reset", "Reset"),
("l", "logs", "View Logs"),
("j", "cursor_down", "Move Down"),
("k", "cursor_up", "Move Up"),
]
def __init__(self):
super().__init__()
self.container_manager = ContainerManager()
self.services_table = None
self.images_table = None
self.status_text = None
self.refresh_timer = None
self.operation_in_progress = False
self._follow_task = None
self._follow_service = None
self._logs_buffer = []
def compose(self) -> ComposeResult:
"""Create the monitoring screen layout."""
# Just show the services content directly (no header, no tabs)
yield from self._create_services_tab()
yield Footer()
def _create_services_tab(self) -> ComposeResult:
"""Create the services monitoring tab."""
# Current mode indicator + toggle
yield Horizontal(
Static("", id="mode-indicator"),
Button("Toggle Mode", id="toggle-mode-btn"),
classes="button-row",
id="mode-row",
)
# Images summary table (above services)
yield Static("Container Images", classes="tab-header")
self.images_table = DataTable(id="images-table", show_cursor=False)
self.images_table.can_focus = False
self.images_table.add_columns("Image", "Digest")
yield self.images_table
yield Static(" ")
# Dynamic controls container; populated based on running state
yield Horizontal(id="services-controls", classes="button-row")
# Create services table with image + digest info
self.services_table = DataTable(id="services-table")
self.services_table.add_columns("Service", "Status", "Health", "Ports", "Image", "Digest")
yield self.services_table
def _get_runtime_status(self) -> Text:
"""Get container runtime status text."""
status_text = Text()
if not self.container_manager.is_available():
status_text.append("WARNING: No container runtime available\n", style="bold red")
status_text.append("Please install Docker or Podman to continue.\n", style="dim")
return status_text
runtime_info = self.container_manager.get_runtime_info()
if runtime_info.runtime_type == RuntimeType.DOCKER:
status_text.append("Docker Runtime\n", style="bold blue")
elif runtime_info.runtime_type == RuntimeType.PODMAN:
status_text.append("Podman Runtime\n", style="bold purple")
else:
status_text.append("Container Runtime\n", style="bold green")
if runtime_info.version:
status_text.append(f"Version: {runtime_info.version}\n", style="dim")
# Check Podman macOS memory if applicable
if runtime_info.runtime_type == RuntimeType.PODMAN:
is_sufficient, message = self.container_manager.check_podman_macos_memory()
if not is_sufficient:
status_text.append(f"WARNING: {message}\n", style="bold yellow")
return status_text
async def on_mount(self) -> None:
"""Initialize the screen when mounted."""
await self._refresh_services()
# Set up auto-refresh every 5 seconds
self.refresh_timer = self.set_interval(5.0, self._auto_refresh)
# Focus the services table
try:
self.services_table.focus()
except Exception:
pass
def on_unmount(self) -> None:
"""Clean up when unmounting."""
if self.refresh_timer:
self.refresh_timer.stop()
# Stop following logs if running
self._stop_follow()
async def on_screen_resume(self) -> None:
"""Called when the screen is resumed (e.g., after a modal is closed)."""
# Refresh services when returning from a modal
await self._refresh_services()
async def _refresh_services(self) -> None:
"""Refresh the services table."""
if not self.container_manager.is_available():
return
services = await self.container_manager.get_service_status(force_refresh=True)
# Collect images actually reported by running/stopped containers so names match runtime
images_set = set()
for svc in services.values():
img = (svc.image or "").strip()
if img and img != "N/A":
images_set.add(img)
# Ensure compose-declared images are also shown (e.g., langflow when stopped)
try:
for img in self.container_manager._parse_compose_images(): # best-effort, no YAML dep
if img:
images_set.add(img)
except Exception:
pass
images = list(images_set)
# Lookup digests/IDs for these image names
digest_map = await self.container_manager.get_images_digests(images)
# Clear existing rows
self.services_table.clear()
if self.images_table:
self.images_table.clear()
# Add service rows
for service_name, service_info in services.items():
status_style = self._get_status_style(service_info.status)
self.services_table.add_row(
service_info.name,
Text(service_info.status.value, style=status_style),
service_info.health or "N/A",
", ".join(service_info.ports) if service_info.ports else "N/A",
service_info.image or "N/A",
digest_map.get(service_info.image or "", "-")
)
# Populate images table (unique images as reported by runtime)
if self.images_table:
for image in sorted(images):
self.images_table.add_row(image, digest_map.get(image, "-"))
# Update controls based on overall state
self._update_controls(list(services.values()))
# Update mode indicator
self._update_mode_row()
def _get_status_style(self, status: ServiceStatus) -> str:
"""Get the Rich style for a service status."""
status_styles = {
ServiceStatus.RUNNING: "bold green",
ServiceStatus.STOPPED: "bold red",
ServiceStatus.STARTING: "bold yellow",
ServiceStatus.STOPPING: "bold yellow",
ServiceStatus.ERROR: "bold red",
ServiceStatus.MISSING: "dim",
ServiceStatus.UNKNOWN: "dim"
}
return status_styles.get(status, "white")
async def _auto_refresh(self) -> None:
"""Auto-refresh services if not in operation."""
if not self.operation_in_progress:
await self._refresh_services()
def on_button_pressed(self, event: Button.Pressed) -> None:
"""Handle button presses."""
button_id = event.button.id or ""
button_label = event.button.label or ""
# Use button ID prefixes to determine action, ignoring any random suffix
if button_id.startswith("start-btn"):
self.run_worker(self._start_services())
elif button_id.startswith("stop-btn"):
self.run_worker(self._stop_services())
elif button_id.startswith("restart-btn"):
self.run_worker(self._restart_services())
elif button_id.startswith("upgrade-btn"):
self.run_worker(self._upgrade_services())
elif button_id.startswith("reset-btn"):
self.run_worker(self._reset_services())
elif button_id == "toggle-mode-btn":
self.action_toggle_mode()
elif button_id.startswith("refresh-btn"):
self.action_refresh()
elif button_id.startswith("back-btn"):
self.action_back()
elif button_id.startswith("logs-"):
# Map button IDs to actual service names
service_mapping = {
"logs-backend": "openrag-backend",
"logs-frontend": "openrag-frontend",
"logs-opensearch": "opensearch",
"logs-langflow": "langflow"
}
# Extract the base button ID (without any suffix)
button_base_id = button_id.split("-")[0] + "-" + button_id.split("-")[1]
service_name = service_mapping.get(button_base_id)
if service_name:
# Load recent logs then start following
self.run_worker(self._show_logs(service_name))
self._start_follow(service_name)
async def _start_services(self, cpu_mode: bool = False) -> None:
"""Start services with progress updates."""
self.operation_in_progress = True
try:
# Show command output in modal dialog
command_generator = self.container_manager.start_services(cpu_mode)
modal = CommandOutputModal(
"Starting Services",
command_generator,
on_complete=None # We'll refresh in on_screen_resume instead
)
self.app.push_screen(modal)
finally:
self.operation_in_progress = False
async def _stop_services(self) -> None:
"""Stop services with progress updates."""
self.operation_in_progress = True
try:
# Show command output in modal dialog
command_generator = self.container_manager.stop_services()
modal = CommandOutputModal(
"Stopping Services",
command_generator,
on_complete=None # We'll refresh in on_screen_resume instead
)
self.app.push_screen(modal)
finally:
self.operation_in_progress = False
async def _restart_services(self) -> None:
"""Restart services with progress updates."""
self.operation_in_progress = True
try:
# Show command output in modal dialog
command_generator = self.container_manager.restart_services()
modal = CommandOutputModal(
"Restarting Services",
command_generator,
on_complete=None # We'll refresh in on_screen_resume instead
)
self.app.push_screen(modal)
finally:
self.operation_in_progress = False
async def _upgrade_services(self) -> None:
"""Upgrade services with progress updates."""
self.operation_in_progress = True
try:
# Show command output in modal dialog
command_generator = self.container_manager.upgrade_services()
modal = CommandOutputModal(
"Upgrading Services",
command_generator,
on_complete=None # We'll refresh in on_screen_resume instead
)
self.app.push_screen(modal)
finally:
self.operation_in_progress = False
async def _reset_services(self) -> None:
"""Reset services with progress updates."""
self.operation_in_progress = True
try:
# Show command output in modal dialog
command_generator = self.container_manager.reset_services()
modal = CommandOutputModal(
"Resetting Services",
command_generator,
on_complete=None # We'll refresh in on_screen_resume instead
)
self.app.push_screen(modal)
finally:
self.operation_in_progress = False
def _strip_ansi_codes(self, text: str) -> str:
"""Strip ANSI escape sequences from text."""
ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
return ansi_escape.sub('', text)
async def _show_logs(self, service_name: str) -> None:
"""Show logs for a service."""
success, logs = await self.container_manager.get_service_logs(service_name)
if success:
# Strip ANSI codes and limit length to prevent UI issues
cleaned_logs = self._strip_ansi_codes(logs)
# Limit to last 5000 characters to prevent performance issues
if len(cleaned_logs) > 5000:
cleaned_logs = "...\n" + cleaned_logs[-5000:]
logs_widget = self.query_one("#logs-content", Static)
logs_widget.update(cleaned_logs)
# Reset buffer to the current content split by lines (cap buffer)
self._logs_buffer = cleaned_logs.splitlines()[-1000:]
# Try to scroll to end of container
try:
scroller = self.query_one("#logs-scroll", ScrollableContainer)
# Only use scroll_end which is the correct method
scroller.scroll_end(animate=False)
except Exception:
pass
else:
notify_with_diagnostics(
self.app,
f"Failed to get logs for {service_name}: {logs}",
severity="error"
)
def _stop_follow(self) -> None:
task = self._follow_task
if task and hasattr(task, "cancel"):
try:
task.cancel()
except Exception:
pass
self._follow_task = None
self._follow_service = None
def _start_follow(self, service_name: str) -> None:
# Stop any existing follower and start a new one
self._stop_follow()
self._follow_service = service_name
self._follow_task = self.run_worker(self._follow_logs(), exclusive=False)
async def _follow_logs(self) -> None:
"""Follow logs for the currently selected service and append to the view."""
service_name = self._follow_service
if not service_name:
return
if not self.container_manager.is_available():
return
try:
async for line in self.container_manager.follow_service_logs(service_name):
cleaned = self._strip_ansi_codes(line.rstrip("\n"))
if not cleaned:
continue
self._logs_buffer.append(cleaned)
# Keep only the last 1000 lines to avoid growth
if len(self._logs_buffer) > 1000:
self._logs_buffer = self._logs_buffer[-1000:]
try:
logs_widget = self.query_one("#logs-content", Static)
logs_widget.update("\n".join(self._logs_buffer))
scroller = self.query_one("#logs-scroll", ScrollableContainer)
# Only use scroll_end which is the correct method
scroller.scroll_end(animate=False)
except Exception:
pass
except Exception as e:
notify_with_diagnostics(
self.app,
f"Error following logs: {e}",
severity="error"
)
def action_refresh(self) -> None:
"""Refresh services manually."""
self.run_worker(self._refresh_services())
def action_cursor_down(self) -> None:
"""Move cursor down in services table."""
try:
self.services_table.action_cursor_down()
except Exception:
pass
def action_cursor_up(self) -> None:
"""Move cursor up in services table."""
try:
self.services_table.action_cursor_up()
except Exception:
pass
def _update_mode_row(self) -> None:
"""Update the mode indicator and toggle button label."""
try:
use_cpu = getattr(self.container_manager, "use_cpu_compose", True)
indicator = self.query_one("#mode-indicator", Static)
mode_text = "Mode: CPU (no GPU detected)" if use_cpu else "Mode: GPU"
indicator.update(mode_text)
toggle_btn = self.query_one("#toggle-mode-btn", Button)
toggle_btn.label = "Switch to GPU Mode" if use_cpu else "Switch to CPU Mode"
except Exception:
pass
def action_toggle_mode(self) -> None:
"""Toggle between CPU/GPU compose files and refresh view."""
try:
current = getattr(self.container_manager, "use_cpu_compose", True)
self.container_manager.use_cpu_compose = not current
self.notify("Switched to GPU compose" if not current else "Switched to CPU compose", severity="information")
self._update_mode_row()
self.action_refresh()
except Exception as e:
notify_with_diagnostics(
self.app,
f"Failed to toggle mode: {e}",
severity="error"
)
def _update_controls(self, services: list[ServiceInfo]) -> None:
"""Update control buttons based on running state."""
try:
# Get the controls container
controls = self.query_one("#services-controls", Horizontal)
# Check if any services are running
any_running = any(s.status == ServiceStatus.RUNNING for s in services)
# Clear existing buttons by removing all children
controls.remove_children()
# Use a single ID for each button type, but make them unique with a suffix
# This ensures we don't create duplicate IDs across refreshes
import random
suffix = f"-{random.randint(10000, 99999)}"
# Add appropriate buttons based on service state
if any_running:
# When services are running, show stop and restart
controls.mount(Button("Stop Services", variant="error", id=f"stop-btn{suffix}"))
controls.mount(Button("Restart", variant="primary", id=f"restart-btn{suffix}"))
else:
# When services are not running, show start
controls.mount(Button("Start Services", variant="success", id=f"start-btn{suffix}"))
# Always show upgrade and reset buttons
controls.mount(Button("Upgrade", variant="warning", id=f"upgrade-btn{suffix}"))
controls.mount(Button("Reset", variant="error", id=f"reset-btn{suffix}"))
except Exception as e:
notify_with_diagnostics(
self.app,
f"Error updating controls: {e}",
severity="error"
)
def action_back(self) -> None:
"""Go back to previous screen."""
self.app.pop_screen()
def action_start(self) -> None:
"""Start services."""
self.run_worker(self._start_services())
def action_stop(self) -> None:
"""Stop services."""
self.run_worker(self._stop_services())
def action_upgrade(self) -> None:
"""Upgrade services."""
self.run_worker(self._upgrade_services())
def action_reset(self) -> None:
"""Reset services."""
self.run_worker(self._reset_services())
def action_logs(self) -> None:
"""View logs for the selected service."""
try:
# Get the currently focused row in the services table
table = self.query_one("#services-table", DataTable)
if table.cursor_row is not None and table.cursor_row >= 0:
# Get the service name from the first column of the selected row
row_data = table.get_row_at(table.cursor_row)
if row_data:
service_name = str(row_data[0]) # First column is service name
# Map display names to actual service names
service_mapping = {
"openrag-backend": "openrag-backend",
"openrag-frontend": "openrag-frontend",
"opensearch": "opensearch",
"langflow": "langflow",
"dashboards": "dashboards"
}
actual_service_name = service_mapping.get(service_name, service_name)
# Push the logs screen with the selected service
from .logs import LogsScreen
logs_screen = LogsScreen(initial_service=actual_service_name)
self.app.push_screen(logs_screen)
else:
self.notify("No service selected", severity="warning")
else:
self.notify("No service selected", severity="warning")
except Exception as e:
self.notify(f"Error opening logs: {e}", severity="error")

192
src/tui/screens/welcome.py Normal file
View file

@ -0,0 +1,192 @@
"""Welcome screen for OpenRAG TUI."""
import os
from pathlib import Path
from textual.app import ComposeResult
from textual.containers import Container, Vertical, Horizontal
from textual.screen import Screen
from textual.widgets import Header, Footer, Static, Button
from rich.text import Text
from rich.align import Align
from dotenv import load_dotenv
from ..managers.container_manager import ContainerManager, ServiceStatus
from ..managers.env_manager import EnvManager
class WelcomeScreen(Screen):
"""Initial welcome screen with setup options."""
BINDINGS = [
("q", "quit", "Quit"),
("enter", "default_action", "Continue"),
("1", "no_auth_setup", "Basic Setup"),
("2", "full_setup", "Advanced Setup"),
("3", "monitor", "Monitor Services"),
("4", "diagnostics", "Diagnostics"),
]
def __init__(self):
super().__init__()
self.container_manager = ContainerManager()
self.env_manager = EnvManager()
self.services_running = False
self.has_oauth_config = False
self.default_button_id = "basic-setup-btn"
self._state_checked = False
# Load .env file if it exists
load_dotenv()
def compose(self) -> ComposeResult:
"""Create the welcome screen layout."""
yield Container(
Vertical(
Static(self._create_welcome_text(), id="welcome-text"),
self._create_dynamic_buttons(),
id="welcome-container"
),
id="main-container"
)
yield Footer()
def _create_welcome_text(self) -> Text:
"""Create a minimal welcome message."""
welcome_text = Text()
ascii_art = """
"""
welcome_text.append(ascii_art, style="bold blue")
welcome_text.append("Terminal User Interface for OpenRAG\n\n", style="dim")
if self.services_running:
welcome_text.append("✓ Services are currently running\n\n", style="bold green")
elif self.has_oauth_config:
welcome_text.append("OAuth credentials detected — Advanced Setup recommended\n\n", style="bold green")
else:
welcome_text.append("Select a setup below to continue\n\n", style="white")
return welcome_text
def _create_dynamic_buttons(self) -> Horizontal:
"""Create buttons based on current state."""
# Check OAuth config early to determine which buttons to show
has_oauth = (
bool(os.getenv("GOOGLE_OAUTH_CLIENT_ID")) or
bool(os.getenv("MICROSOFT_GRAPH_OAUTH_CLIENT_ID"))
)
buttons = []
if self.services_running:
# Services running - only show monitor
buttons.append(Button("Monitor Services", variant="success", id="monitor-btn"))
else:
# Services not running - show setup options
if has_oauth:
# Only show advanced setup if OAuth is configured
buttons.append(Button("Advanced Setup", variant="success", id="advanced-setup-btn"))
else:
# Only show basic setup if no OAuth
buttons.append(Button("Basic Setup", variant="success", id="basic-setup-btn"))
# Always show monitor option
buttons.append(Button("Monitor Services", variant="default", id="monitor-btn"))
return Horizontal(*buttons, classes="button-row")
async def on_mount(self) -> None:
"""Initialize screen state when mounted."""
# Check if services are running
if self.container_manager.is_available():
services = await self.container_manager.get_service_status()
running_services = [s.name for s in services.values() if s.status == ServiceStatus.RUNNING]
self.services_running = len(running_services) > 0
# Check for OAuth configuration
self.has_oauth_config = (
bool(os.getenv("GOOGLE_OAUTH_CLIENT_ID")) or
bool(os.getenv("MICROSOFT_GRAPH_OAUTH_CLIENT_ID"))
)
# Set default button focus
if self.services_running:
self.default_button_id = "monitor-btn"
elif self.has_oauth_config:
self.default_button_id = "advanced-setup-btn"
else:
self.default_button_id = "basic-setup-btn"
# Update the welcome text and recompose with new state
try:
welcome_widget = self.query_one("#welcome-text")
welcome_widget.update(self._create_welcome_text()) # This is fine for Static widgets
# Focus the appropriate button
if self.services_running:
try:
self.query_one("#monitor-btn").focus()
except:
pass
elif self.has_oauth_config:
try:
self.query_one("#advanced-setup-btn").focus()
except:
pass
else:
try:
self.query_one("#basic-setup-btn").focus()
except:
pass
except:
pass # Widgets might not be mounted yet
def on_button_pressed(self, event: Button.Pressed) -> None:
"""Handle button presses."""
if event.button.id == "basic-setup-btn":
self.action_no_auth_setup()
elif event.button.id == "advanced-setup-btn":
self.action_full_setup()
elif event.button.id == "monitor-btn":
self.action_monitor()
elif event.button.id == "diagnostics-btn":
self.action_diagnostics()
def action_default_action(self) -> None:
"""Handle Enter key - go to default action based on state."""
if self.services_running:
self.action_monitor()
elif self.has_oauth_config:
self.action_full_setup()
else:
self.action_no_auth_setup()
def action_no_auth_setup(self) -> None:
"""Switch to basic configuration screen."""
from .config import ConfigScreen
self.app.push_screen(ConfigScreen(mode="no_auth"))
def action_full_setup(self) -> None:
"""Switch to advanced configuration screen."""
from .config import ConfigScreen
self.app.push_screen(ConfigScreen(mode="full"))
def action_monitor(self) -> None:
"""Switch to monitoring screen."""
from .monitor import MonitorScreen
self.app.push_screen(MonitorScreen())
def action_diagnostics(self) -> None:
"""Switch to diagnostics screen."""
from .diagnostics import DiagnosticsScreen
self.app.push_screen(DiagnosticsScreen())
def action_quit(self) -> None:
"""Quit the application."""
self.app.exit()

View file

@ -0,0 +1 @@
"""TUI utilities package."""

170
src/tui/utils/platform.py Normal file
View file

@ -0,0 +1,170 @@
"""Platform detection and container runtime discovery utilities."""
import json
import platform
import subprocess
from dataclasses import dataclass
from enum import Enum
from typing import Optional
class RuntimeType(Enum):
DOCKER_COMPOSE = "docker-compose"
DOCKER = "docker"
PODMAN = "podman"
NONE = "none"
@dataclass
class RuntimeInfo:
runtime_type: RuntimeType
compose_command: list[str]
runtime_command: list[str]
version: Optional[str] = None
class PlatformDetector:
"""Detect platform and container runtime capabilities."""
def __init__(self):
self.platform_system = platform.system()
self.platform_machine = platform.machine()
def detect_runtime(self) -> RuntimeInfo:
"""Detect available container runtime and compose capabilities."""
# First check if we have podman installed
podman_version = self._get_podman_version()
# If we have podman, check if docker is actually podman in disguise
if podman_version:
docker_version = self._get_docker_version()
if docker_version and podman_version in docker_version:
# This is podman masquerading as docker
if self._check_command(["docker", "compose", "--help"]):
return RuntimeInfo(RuntimeType.PODMAN, ["docker", "compose"], ["docker"], podman_version)
if self._check_command(["docker-compose", "--help"]):
return RuntimeInfo(RuntimeType.PODMAN, ["docker-compose"], ["docker"], podman_version)
# Check for native podman compose
if self._check_command(["podman", "compose", "--help"]):
return RuntimeInfo(RuntimeType.PODMAN, ["podman", "compose"], ["podman"], podman_version)
# Check for actual docker
if self._check_command(["docker", "compose", "--help"]):
version = self._get_docker_version()
return RuntimeInfo(RuntimeType.DOCKER, ["docker", "compose"], ["docker"], version)
if self._check_command(["docker-compose", "--help"]):
version = self._get_docker_version()
return RuntimeInfo(RuntimeType.DOCKER_COMPOSE, ["docker-compose"], ["docker"], version)
return RuntimeInfo(RuntimeType.NONE, [], [])
def detect_gpu_available(self) -> bool:
"""Best-effort detection of NVIDIA GPU availability for containers."""
try:
res = subprocess.run(["nvidia-smi", "-L"], capture_output=True, text=True, timeout=5)
if res.returncode == 0 and any("GPU" in ln for ln in res.stdout.splitlines()):
return True
except (subprocess.TimeoutExpired, FileNotFoundError):
pass
for cmd in (["docker", "info", "--format", "{{json .Runtimes}}"], ["podman", "info", "--format", "json"]):
try:
res = subprocess.run(cmd, capture_output=True, text=True, timeout=5)
if res.returncode == 0 and "nvidia" in res.stdout.lower():
return True
except (subprocess.TimeoutExpired, FileNotFoundError):
continue
return False
def _check_command(self, cmd: list[str]) -> bool:
try:
result = subprocess.run(cmd, capture_output=True, text=True, timeout=10)
return result.returncode == 0
except (subprocess.TimeoutExpired, FileNotFoundError):
return False
def _get_docker_version(self) -> Optional[str]:
try:
res = subprocess.run(["docker", "--version"], capture_output=True, text=True, timeout=5)
if res.returncode == 0:
return res.stdout.strip()
except (subprocess.TimeoutExpired, FileNotFoundError):
pass
return None
def _get_podman_version(self) -> Optional[str]:
try:
res = subprocess.run(["podman", "--version"], capture_output=True, text=True, timeout=5)
if res.returncode == 0:
return res.stdout.strip()
except (subprocess.TimeoutExpired, FileNotFoundError):
pass
return None
def check_podman_macos_memory(self) -> tuple[bool, int, str]:
"""
Check Podman VM memory on macOS.
Returns (is_sufficient, current_memory_mb, status_message)
"""
if self.platform_system != "Darwin":
return True, 0, "Not running on macOS"
try:
result = subprocess.run(["podman", "machine", "inspect"], capture_output=True, text=True, timeout=10)
if result.returncode != 0:
return False, 0, "Could not inspect Podman machine"
machines = json.loads(result.stdout)
if not machines:
return False, 0, "No Podman machines found"
machine = machines[0]
memory_mb = machine.get("Resources", {}).get("Memory", 0)
min_memory_mb = 8192
is_sufficient = memory_mb >= min_memory_mb
status = f"Current: {memory_mb}MB, Recommended: ≥{min_memory_mb}MB"
if not is_sufficient:
status += "\nTo increase: podman machine stop && podman machine rm && podman machine init --memory 8192 && podman machine start"
return is_sufficient, memory_mb, status
except (subprocess.TimeoutExpired, FileNotFoundError, json.JSONDecodeError) as e:
return False, 0, f"Error checking Podman VM memory: {e}"
def get_installation_instructions(self) -> str:
if self.platform_system == "Darwin":
return """
No container runtime found. Please install one:
Docker Desktop for Mac:
https://docs.docker.com/desktop/install/mac-install/
Or Podman:
brew install podman
podman machine init --memory 8192
podman machine start
"""
elif self.platform_system == "Linux":
return """
No container runtime found. Please install one:
Docker:
curl -fsSL https://get.docker.com -o get-docker.sh
sudo sh get-docker.sh
Or Podman:
# Ubuntu/Debian: sudo apt install podman
# RHEL/Fedora: sudo dnf install podman
"""
elif self.platform_system == "Windows":
return """
No container runtime found. Please install one:
Docker Desktop for Windows:
https://docs.docker.com/desktop/install/windows-install/
Or Podman Desktop:
https://podman-desktop.io/downloads
"""
else:
return """
No container runtime found. Please install Docker or Podman for your platform:
- Docker: https://docs.docker.com/get-docker/
- Podman: https://podman.io/getting-started/installation
"""

133
src/tui/utils/validation.py Normal file
View file

@ -0,0 +1,133 @@
"""Input validation utilities for TUI."""
import os
import re
from pathlib import Path
from typing import Optional
class ValidationError(Exception):
"""Validation error exception."""
pass
def validate_env_var_name(name: str) -> bool:
"""Validate environment variable name format."""
return bool(re.match(r'^[A-Z][A-Z0-9_]*$', name))
def validate_path(path: str, must_exist: bool = False, must_be_dir: bool = False) -> bool:
"""Validate file/directory path."""
if not path:
return False
try:
path_obj = Path(path).expanduser().resolve()
if must_exist and not path_obj.exists():
return False
if must_be_dir and path_obj.exists() and not path_obj.is_dir():
return False
return True
except (OSError, ValueError):
return False
def validate_url(url: str) -> bool:
"""Validate URL format."""
if not url:
return False
url_pattern = re.compile(
r'^https?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?|' # domain
r'localhost|' # localhost
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # IP
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return bool(url_pattern.match(url))
def validate_openai_api_key(key: str) -> bool:
"""Validate OpenAI API key format."""
if not key:
return False
return key.startswith('sk-') and len(key) > 20
def validate_google_oauth_client_id(client_id: str) -> bool:
"""Validate Google OAuth client ID format."""
if not client_id:
return False
return client_id.endswith('.apps.googleusercontent.com')
def validate_non_empty(value: str) -> bool:
"""Validate that value is not empty."""
return bool(value and value.strip())
def sanitize_env_value(value: str) -> str:
"""Sanitize environment variable value."""
# Remove leading/trailing whitespace
value = value.strip()
# Remove quotes if they wrap the entire value
if len(value) >= 2:
if (value.startswith('"') and value.endswith('"')) or \
(value.startswith("'") and value.endswith("'")):
value = value[1:-1]
return value
def validate_documents_paths(paths_str: str) -> tuple[bool, str, list[str]]:
"""
Validate comma-separated documents paths for volume mounting.
Returns:
(is_valid, error_message, validated_paths)
"""
if not paths_str:
return False, "Documents paths cannot be empty", []
paths = [path.strip() for path in paths_str.split(',') if path.strip()]
if not paths:
return False, "No valid paths provided", []
validated_paths = []
for path in paths:
try:
path_obj = Path(path).expanduser().resolve()
# Check if path exists
if not path_obj.exists():
# Try to create it
try:
path_obj.mkdir(parents=True, exist_ok=True)
except (OSError, PermissionError) as e:
return False, f"Cannot create directory '{path}': {e}", []
# Check if it's a directory
if not path_obj.is_dir():
return False, f"Path '{path}' must be a directory", []
# Check if we can write to it
try:
test_file = path_obj / ".openrag_test"
test_file.touch()
test_file.unlink()
except (OSError, PermissionError):
return False, f"Directory '{path}' is not writable", []
validated_paths.append(str(path_obj))
except (OSError, ValueError) as e:
return False, f"Invalid path '{path}': {e}", []
return True, "All paths valid", validated_paths

View file

@ -0,0 +1,3 @@
"""Widgets for OpenRAG TUI."""
# Made with Bob

View file

@ -0,0 +1,132 @@
"""Command output modal dialog for OpenRAG TUI."""
import asyncio
from typing import Callable, List, Optional, AsyncIterator, Any
from textual.app import ComposeResult
from textual.worker import Worker
from textual.containers import Container, ScrollableContainer
from textual.screen import ModalScreen
from textual.widgets import Button, Static, Label, RichLog
from rich.console import Console
class CommandOutputModal(ModalScreen):
"""Modal dialog for displaying command output in real-time."""
DEFAULT_CSS = """
CommandOutputModal {
align: center middle;
}
#dialog {
width: 90%;
height: 90%;
border: thick $primary;
background: $surface;
padding: 0;
}
#title {
background: $primary;
color: $text;
padding: 1 2;
text-align: center;
width: 100%;
text-style: bold;
}
#output-container {
height: 1fr;
padding: 0;
margin: 0 1;
}
#command-output {
height: 100%;
border: solid $accent;
padding: 1 2;
margin: 1 0;
background: $surface-darken-1;
}
#button-row {
width: 100%;
height: auto;
align: center middle;
padding: 1;
margin-top: 1;
}
#button-row Button {
margin: 0 1;
min-width: 16;
}
"""
def __init__(
self,
title: str,
command_generator: AsyncIterator[tuple[bool, str]],
on_complete: Optional[Callable] = None
):
"""Initialize the modal dialog.
Args:
title: Title of the modal dialog
command_generator: Async generator that yields (is_complete, message) tuples
on_complete: Optional callback to run when command completes
"""
super().__init__()
self.title_text = title
self.command_generator = command_generator
self.on_complete = on_complete
def compose(self) -> ComposeResult:
"""Create the modal dialog layout."""
with Container(id="dialog"):
yield Label(self.title_text, id="title")
with ScrollableContainer(id="output-container"):
yield RichLog(id="command-output", highlight=True, markup=True)
with Container(id="button-row"):
yield Button("Close", variant="primary", id="close-btn")
def on_mount(self) -> None:
"""Start the command when the modal is mounted."""
# Start the command but don't store the worker
self.run_worker(self._run_command(), exclusive=False)
def on_button_pressed(self, event: Button.Pressed) -> None:
"""Handle button presses."""
if event.button.id == "close-btn":
self.dismiss()
async def _run_command(self) -> None:
"""Run the command and update the output in real-time."""
output = self.query_one("#command-output", RichLog)
try:
async for is_complete, message in self.command_generator:
# Simple approach: just append each line as it comes
output.write(message + "\n")
# Scroll to bottom
container = self.query_one("#output-container", ScrollableContainer)
container.scroll_end(animate=False)
# If command is complete, update UI
if is_complete:
output.write("[bold green]Command completed successfully[/bold green]\n")
# Call the completion callback if provided
if self.on_complete:
await asyncio.sleep(0.5) # Small delay for better UX
self.on_complete()
except Exception as e:
output.write(f"[bold red]Error: {e}[/bold red]\n")
# Enable the close button and focus it
close_btn = self.query_one("#close-btn", Button)
close_btn.disabled = False
close_btn.focus()
# Made with Bob

View file

@ -0,0 +1,38 @@
"""Utility functions for showing error notifications with diagnostics button."""
from typing import Literal
from textual.app import App
def notify_with_diagnostics(
app: App,
message: str,
severity: Literal["information", "warning", "error"] = "error",
timeout: float = 10.0
) -> None:
"""Show a notification with a button to open the diagnostics screen.
Args:
app: The Textual app
message: The notification message
severity: The notification severity
timeout: The notification timeout in seconds
"""
# First show the notification
app.notify(message, severity=severity, timeout=timeout)
# Then add a button to open diagnostics screen
def open_diagnostics() -> None:
from ..screens.diagnostics import DiagnosticsScreen
app.push_screen(DiagnosticsScreen())
# Add a separate notification with just the button
app.notify(
"Click to view diagnostics",
severity="information",
timeout=timeout,
title="Diagnostics"
)
# Made with Bob

View file

@ -0,0 +1,38 @@
"""Utility functions for showing error notifications with diagnostics button."""
from typing import Literal, Callable
from textual.app import App
def notify_with_diagnostics(
app: App,
message: str,
severity: Literal["information", "warning", "error"] = "error",
timeout: float = 10.0
) -> None:
"""Show a notification with a button to open the diagnostics screen.
Args:
app: The Textual app
message: The notification message
severity: The notification severity
timeout: The notification timeout in seconds
"""
# First show the notification
app.notify(message, severity=severity, timeout=timeout)
# Then add a button to open diagnostics screen
def open_diagnostics() -> None:
from ..screens.diagnostics import DiagnosticsScreen
app.push_screen(DiagnosticsScreen())
# Add a separate notification with just the button
app.notify(
"Click to view diagnostics",
severity="information",
timeout=timeout,
title="Diagnostics"
)
# Made with Bob

View file

@ -0,0 +1,81 @@
import os
import sys
from typing import Any, Dict
import structlog
from structlog import processors
def configure_logging(
log_level: str = "INFO",
json_logs: bool = False,
include_timestamps: bool = True,
service_name: str = "openrag"
) -> None:
"""Configure structlog for the application."""
# Convert string log level to actual level
level = getattr(structlog.stdlib.logging, log_level.upper(), structlog.stdlib.logging.INFO)
# Base processors
shared_processors = [
structlog.contextvars.merge_contextvars,
structlog.processors.add_log_level,
structlog.processors.StackInfoRenderer(),
structlog.dev.set_exc_info,
]
if include_timestamps:
shared_processors.append(structlog.processors.TimeStamper(fmt="iso"))
# Add service name to all logs
shared_processors.append(
structlog.processors.CallsiteParameterAdder(
parameters=[structlog.processors.CallsiteParameter.FUNC_NAME]
)
)
# Console output configuration
if json_logs or os.getenv("LOG_FORMAT", "").lower() == "json":
# JSON output for production/containers
shared_processors.append(structlog.processors.JSONRenderer())
console_renderer = structlog.processors.JSONRenderer()
else:
# Pretty colored output for development
console_renderer = structlog.dev.ConsoleRenderer(
colors=sys.stderr.isatty(),
exception_formatter=structlog.dev.plain_traceback,
)
# Configure structlog
structlog.configure(
processors=shared_processors + [console_renderer],
wrapper_class=structlog.make_filtering_bound_logger(level),
context_class=dict,
logger_factory=structlog.WriteLoggerFactory(sys.stderr),
cache_logger_on_first_use=True,
)
# Add global context
structlog.contextvars.clear_contextvars()
structlog.contextvars.bind_contextvars(service=service_name)
def get_logger(name: str = None) -> structlog.BoundLogger:
"""Get a configured logger instance."""
if name:
return structlog.get_logger(name)
return structlog.get_logger()
# Convenience function to configure logging from environment
def configure_from_env() -> None:
"""Configure logging from environment variables."""
log_level = os.getenv("LOG_LEVEL", "INFO")
json_logs = os.getenv("LOG_FORMAT", "").lower() == "json"
service_name = os.getenv("SERVICE_NAME", "openrag")
configure_logging(
log_level=log_level,
json_logs=json_logs,
service_name=service_name
)

97
uv.lock generated
View file

@ -864,6 +864,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/83/60/d497a310bde3f01cb805196ac61b7ad6dc5dcf8dce66634dc34364b20b4f/lazy_loader-0.4-py3-none-any.whl", hash = "sha256:342aa8e14d543a154047afb4ba8ef17f5563baad3fc610d7b15b213b0f119efc", size = 12097, upload-time = "2024-04-05T13:03:10.514Z" },
]
[[package]]
name = "linkify-it-py"
version = "2.0.3"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "uc-micro-py" },
]
sdist = { url = "https://files.pythonhosted.org/packages/2a/ae/bb56c6828e4797ba5a4821eec7c43b8bf40f69cda4d4f5f8c8a2810ec96a/linkify-it-py-2.0.3.tar.gz", hash = "sha256:68cda27e162e9215c17d786649d1da0021a451bdc436ef9e0fa0ba5234b9b048", size = 27946, upload-time = "2024-02-04T14:48:04.179Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/04/1e/b832de447dee8b582cac175871d2f6c3d5077cc56d5575cadba1fd1cccfa/linkify_it_py-2.0.3-py3-none-any.whl", hash = "sha256:6bcbc417b0ac14323382aef5c5192c0075bf8a9d6b41820a2b66371eac6b6d79", size = 19820, upload-time = "2024-02-04T14:48:02.496Z" },
]
[[package]]
name = "litellm"
version = "1.74.1"
@ -923,6 +935,14 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528, upload-time = "2023-06-03T06:41:11.019Z" },
]
[package.optional-dependencies]
linkify = [
{ name = "linkify-it-py" },
]
plugins = [
{ name = "mdit-py-plugins" },
]
[[package]]
name = "marko"
version = "2.1.4"
@ -994,6 +1014,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/64/56/f98938bded6b2ac779c55e36bf5277d1fe4154da2246aa0621c1358efa2b/mcp_subscribe-0.1.1-py3-none-any.whl", hash = "sha256:617b8dc30253a992bddcb6023de6cce7eb95d3b976dc9a828892242c7a2c6eaa", size = 5092, upload-time = "2025-04-28T05:46:42.024Z" },
]
[[package]]
name = "mdit-py-plugins"
version = "0.5.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "markdown-it-py" },
]
sdist = { url = "https://files.pythonhosted.org/packages/b2/fd/a756d36c0bfba5f6e39a1cdbdbfdd448dc02692467d83816dff4592a1ebc/mdit_py_plugins-0.5.0.tar.gz", hash = "sha256:f4918cb50119f50446560513a8e311d574ff6aaed72606ddae6d35716fe809c6", size = 44655, upload-time = "2025-08-11T07:25:49.083Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/fb/86/dd6e5db36df29e76c7a7699123569a4a18c1623ce68d826ed96c62643cae/mdit_py_plugins-0.5.0-py3-none-any.whl", hash = "sha256:07a08422fc1936a5d26d146759e9155ea466e842f5ab2f7d2266dd084c8dab1f", size = 57205, upload-time = "2025-08-11T07:25:47.597Z" },
]
[[package]]
name = "mdurl"
version = "0.1.2"
@ -1389,8 +1421,13 @@ dependencies = [
{ name = "opensearch-py", extra = ["async"] },
{ name = "psutil" },
{ name = "pyjwt" },
{ name = "python-dotenv" },
{ name = "python-multipart" },
{ name = "rich" },
{ name = "starlette" },
{ name = "structlog" },
{ name = "textual" },
{ name = "textual-fspicker" },
{ name = "torch", version = "2.7.1+cu128", source = { registry = "https://download.pytorch.org/whl/cu128" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" },
{ name = "torch", version = "2.8.0", source = { registry = "https://pypi.org/simple" }, marker = "platform_machine != 'x86_64' or sys_platform != 'linux'" },
{ name = "uvicorn" },
@ -1411,8 +1448,13 @@ requires-dist = [
{ name = "opensearch-py", extras = ["async"], specifier = ">=3.0.0" },
{ name = "psutil", specifier = ">=7.0.0" },
{ name = "pyjwt", specifier = ">=2.8.0" },
{ name = "python-dotenv", specifier = ">=1.0.0" },
{ name = "python-multipart", specifier = ">=0.0.20" },
{ name = "rich", specifier = ">=13.0.0" },
{ name = "starlette", specifier = ">=0.47.1" },
{ name = "structlog", specifier = ">=25.4.0" },
{ name = "textual", specifier = ">=0.45.0" },
{ name = "textual-fspicker", specifier = ">=0.6.0" },
{ name = "torch", marker = "platform_machine != 'x86_64' or sys_platform != 'linux'", specifier = ">=2.7.1" },
{ name = "torch", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'", specifier = ">=2.7.1", index = "https://download.pytorch.org/whl/cu128" },
{ name = "uvicorn", specifier = ">=0.35.0" },
@ -1530,6 +1572,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/89/c7/5572fa4a3f45740eaab6ae86fcdf7195b55beac1371ac8c619d880cfe948/pillow-11.3.0-cp314-cp314t-win_arm64.whl", hash = "sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa", size = 2512835, upload-time = "2025-07-01T09:15:50.399Z" },
]
[[package]]
name = "platformdirs"
version = "4.4.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/23/e8/21db9c9987b0e728855bd57bff6984f67952bea55d6f75e055c46b5383e8/platformdirs-4.4.0.tar.gz", hash = "sha256:ca753cf4d81dc309bc67b0ea38fd15dc97bc30ce419a7f58d13eb3bf14c4febf", size = 21634, upload-time = "2025-08-26T14:32:04.268Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/40/4b/2028861e724d3bd36227adfa20d3fd24c3fc6d52032f4a93c133be5d17ce/platformdirs-4.4.0-py3-none-any.whl", hash = "sha256:abd01743f24e5287cd7a5db3752faf1a2d65353f38ec26d98e25a6db65958c85", size = 18654, upload-time = "2025-08-26T14:32:02.735Z" },
]
[[package]]
name = "pluggy"
version = "1.6.0"
@ -2267,6 +2318,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/82/95/38ef0cd7fa11eaba6a99b3c4f5ac948d8bc6ff199aabd327a29cc000840c/starlette-0.47.1-py3-none-any.whl", hash = "sha256:5e11c9f5c7c3f24959edbf2dffdc01bba860228acf657129467d8a7468591527", size = 72747, upload-time = "2025-06-21T04:03:15.705Z" },
]
[[package]]
name = "structlog"
version = "25.4.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/79/b9/6e672db4fec07349e7a8a8172c1a6ae235c58679ca29c3f86a61b5e59ff3/structlog-25.4.0.tar.gz", hash = "sha256:186cd1b0a8ae762e29417095664adf1d6a31702160a46dacb7796ea82f7409e4", size = 1369138, upload-time = "2025-06-02T08:21:12.971Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/a0/4a/97ee6973e3a73c74c8120d59829c3861ea52210667ec3e7a16045c62b64d/structlog-25.4.0-py3-none-any.whl", hash = "sha256:fe809ff5c27e557d14e613f45ca441aabda051d119ee5a0102aaba6ce40eed2c", size = 68720, upload-time = "2025-06-02T08:21:11.43Z" },
]
[[package]]
name = "sympy"
version = "1.14.0"
@ -2288,6 +2348,34 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/40/44/4a5f08c96eb108af5cb50b41f76142f0afa346dfa99d5296fe7202a11854/tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f", size = 35252, upload-time = "2022-10-06T17:21:44.262Z" },
]
[[package]]
name = "textual"
version = "6.1.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "markdown-it-py", extra = ["linkify", "plugins"] },
{ name = "platformdirs" },
{ name = "pygments" },
{ name = "rich" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/da/44/4b524b2f06e0fa6c4ede56a4e9af5edd5f3f83cf2eea5cb4fd0ce5bbe063/textual-6.1.0.tar.gz", hash = "sha256:cc89826ca2146c645563259320ca4ddc75d183c77afb7d58acdd46849df9144d", size = 1564786, upload-time = "2025-09-02T11:42:34.655Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/54/43/f91e041f239b54399310a99041faf33beae9a6e628671471d0fcd6276af4/textual-6.1.0-py3-none-any.whl", hash = "sha256:a3f5e6710404fcdc6385385db894699282dccf2ad50103cebc677403c1baadd5", size = 707840, upload-time = "2025-09-02T11:42:32.746Z" },
]
[[package]]
name = "textual-fspicker"
version = "0.6.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "textual" },
]
sdist = { url = "https://files.pythonhosted.org/packages/15/2e/8c1ae6f0c26af2fe0c49d61d42c91d0077cbfd984df049d7e3d82a40d93d/textual_fspicker-0.6.0.tar.gz", hash = "sha256:0da0e3f35025f72c5b90557d12777c9f67c674470b3263cbe2c2de38f5b70c3c", size = 16157, upload-time = "2025-08-26T15:38:19.805Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/06/62/463994497050869e517dde4da7f628599f6aea05dcd6bbb14d2a945c2499/textual_fspicker-0.6.0-py3-none-any.whl", hash = "sha256:4d0ddbebdc5d7c93ad0d1f48627003a60690bc6d382267ee033cfeb2e6b4949c", size = 24715, upload-time = "2025-08-26T15:38:14.344Z" },
]
[[package]]
name = "tifffile"
version = "2025.6.11"
@ -2552,6 +2640,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839, upload-time = "2025-03-23T13:54:41.845Z" },
]
[[package]]
name = "uc-micro-py"
version = "1.0.3"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/91/7a/146a99696aee0609e3712f2b44c6274566bc368dfe8375191278045186b8/uc-micro-py-1.0.3.tar.gz", hash = "sha256:d321b92cff673ec58027c04015fcaa8bb1e005478643ff4a500882eaab88c48a", size = 6043, upload-time = "2024-02-09T16:52:01.654Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/37/87/1f677586e8ac487e29672e4b17455758fce261de06a0d086167bb760361a/uc_micro_py-1.0.3-py3-none-any.whl", hash = "sha256:db1dffff340817673d7b466ec86114a9dc0e9d4d9b5ba229d9d60e5c12600cd5", size = 6229, upload-time = "2024-02-09T16:52:00.371Z" },
]
[[package]]
name = "uritemplate"
version = "4.2.0"