Merge branch 'main' into release-rc-0.1.55
This commit is contained in:
commit
0740183a0f
10 changed files with 508 additions and 185 deletions
22
Makefile
22
Makefile
|
|
@ -55,7 +55,7 @@ help:
|
|||
# Development environments
|
||||
dev:
|
||||
@echo "🚀 Starting OpenRAG with GPU support..."
|
||||
docker compose up -d
|
||||
docker compose -f docker-compose.yml -f docker-compose.gpu.yml up -d
|
||||
@echo "✅ Services started!"
|
||||
@echo " Backend: http://localhost:8000"
|
||||
@echo " Frontend: http://localhost:3000"
|
||||
|
|
@ -65,7 +65,7 @@ dev:
|
|||
|
||||
dev-cpu:
|
||||
@echo "🚀 Starting OpenRAG with CPU only..."
|
||||
docker compose -f docker-compose-cpu.yml up -d
|
||||
docker compose up -d
|
||||
@echo "✅ Services started!"
|
||||
@echo " Backend: http://localhost:8000"
|
||||
@echo " Frontend: http://localhost:3000"
|
||||
|
|
@ -93,7 +93,7 @@ infra:
|
|||
|
||||
infra-cpu:
|
||||
@echo "🔧 Starting infrastructure services only..."
|
||||
docker-compose -f docker-compose-cpu.yml up -d opensearch dashboards langflow
|
||||
docker compose up -d opensearch dashboards langflow
|
||||
@echo "✅ Infrastructure services started!"
|
||||
@echo " Langflow: http://localhost:7860"
|
||||
@echo " OpenSearch: http://localhost:9200"
|
||||
|
|
@ -103,14 +103,12 @@ infra-cpu:
|
|||
stop:
|
||||
@echo "🛑 Stopping all containers..."
|
||||
docker compose down
|
||||
docker compose -f docker-compose-cpu.yml down 2>/dev/null || true
|
||||
|
||||
restart: stop dev
|
||||
|
||||
clean: stop
|
||||
@echo "🧹 Cleaning up containers and volumes..."
|
||||
docker compose down -v --remove-orphans
|
||||
docker compose -f docker-compose-cpu.yml down -v --remove-orphans 2>/dev/null || true
|
||||
docker system prune -f
|
||||
|
||||
# Local development
|
||||
|
|
@ -210,13 +208,13 @@ test-ci:
|
|||
chmod 644 keys/public_key.pem 2>/dev/null || true; \
|
||||
fi; \
|
||||
echo "Cleaning up old containers and volumes..."; \
|
||||
docker compose -f docker-compose-cpu.yml down -v 2>/dev/null || true; \
|
||||
docker compose down -v 2>/dev/null || true; \
|
||||
echo "Pulling latest images..."; \
|
||||
docker compose -f docker-compose-cpu.yml pull; \
|
||||
docker compose pull; \
|
||||
echo "Building OpenSearch image override..."; \
|
||||
docker build --no-cache -t langflowai/openrag-opensearch:latest -f Dockerfile .; \
|
||||
echo "Starting infra (OpenSearch + Dashboards + Langflow + Backend + Frontend) with CPU containers"; \
|
||||
docker compose -f docker-compose-cpu.yml up -d opensearch dashboards langflow openrag-backend openrag-frontend; \
|
||||
docker compose up -d opensearch dashboards langflow openrag-backend openrag-frontend; \
|
||||
echo "Starting docling-serve..."; \
|
||||
DOCLING_ENDPOINT=$$(uv run python scripts/docling_ctl.py start --port 5001 | grep "Endpoint:" | awk '{print $$2}'); \
|
||||
echo "Docling-serve started at $$DOCLING_ENDPOINT"; \
|
||||
|
|
@ -288,7 +286,7 @@ test-ci:
|
|||
echo ""; \
|
||||
echo "Tearing down infra"; \
|
||||
uv run python scripts/docling_ctl.py stop || true; \
|
||||
docker compose -f docker-compose-cpu.yml down -v 2>/dev/null || true; \
|
||||
docker compose down -v 2>/dev/null || true; \
|
||||
exit $$TEST_RESULT
|
||||
|
||||
# CI-friendly integration test target with local builds: builds all images, brings up infra, waits, runs tests, tears down
|
||||
|
|
@ -305,14 +303,14 @@ test-ci-local:
|
|||
chmod 644 keys/public_key.pem 2>/dev/null || true; \
|
||||
fi; \
|
||||
echo "Cleaning up old containers and volumes..."; \
|
||||
docker compose -f docker-compose-cpu.yml down -v 2>/dev/null || true; \
|
||||
docker compose down -v 2>/dev/null || true; \
|
||||
echo "Building all images locally..."; \
|
||||
docker build -t langflowai/openrag-opensearch:latest -f Dockerfile .; \
|
||||
docker build -t langflowai/openrag-backend:latest -f Dockerfile.backend .; \
|
||||
docker build -t langflowai/openrag-frontend:latest -f Dockerfile.frontend .; \
|
||||
docker build -t langflowai/openrag-langflow:latest -f Dockerfile.langflow .; \
|
||||
echo "Starting infra (OpenSearch + Dashboards + Langflow + Backend + Frontend) with CPU containers"; \
|
||||
docker compose -f docker-compose-cpu.yml up -d opensearch dashboards langflow openrag-backend openrag-frontend; \
|
||||
docker compose up -d opensearch dashboards langflow openrag-backend openrag-frontend; \
|
||||
echo "Starting docling-serve..."; \
|
||||
DOCLING_ENDPOINT=$$(uv run python scripts/docling_ctl.py start --port 5001 | grep "Endpoint:" | awk '{print $$2}'); \
|
||||
echo "Docling-serve started at $$DOCLING_ENDPOINT"; \
|
||||
|
|
@ -394,7 +392,7 @@ test-ci-local:
|
|||
fi; \
|
||||
echo "Tearing down infra"; \
|
||||
uv run python scripts/docling_ctl.py stop || true; \
|
||||
docker compose -f docker-compose-cpu.yml down -v 2>/dev/null || true; \
|
||||
docker compose down -v 2>/dev/null || true; \
|
||||
exit $$TEST_RESULT
|
||||
|
||||
# SDK integration tests (requires running OpenRAG instance)
|
||||
|
|
|
|||
|
|
@ -1,143 +0,0 @@
|
|||
services:
|
||||
opensearch:
|
||||
image: langflowai/openrag-opensearch:${OPENRAG_VERSION:-latest}
|
||||
#build:
|
||||
# context: .
|
||||
# dockerfile: Dockerfile
|
||||
container_name: os
|
||||
depends_on:
|
||||
- openrag-backend
|
||||
environment:
|
||||
- discovery.type=single-node
|
||||
- OPENSEARCH_INITIAL_ADMIN_PASSWORD=${OPENSEARCH_PASSWORD}
|
||||
# Run security setup in background after OpenSearch starts
|
||||
command: >
|
||||
bash -c "
|
||||
# Ensure data directory has correct permissions
|
||||
sudo chown -R opensearch:opensearch /usr/share/opensearch/data || true
|
||||
|
||||
# Start OpenSearch in background
|
||||
/usr/share/opensearch/opensearch-docker-entrypoint.sh opensearch &
|
||||
|
||||
# Wait a bit for OpenSearch to start, then apply security config
|
||||
sleep 10 && /usr/share/opensearch/setup-security.sh &
|
||||
|
||||
# Wait for background processes
|
||||
wait
|
||||
"
|
||||
ports:
|
||||
- "9200:9200"
|
||||
- "9600:9600"
|
||||
volumes:
|
||||
- ${OPENSEARCH_DATA_PATH:-./opensearch-data}:/usr/share/opensearch/data:Z
|
||||
|
||||
dashboards:
|
||||
image: opensearchproject/opensearch-dashboards:3.0.0
|
||||
container_name: osdash
|
||||
depends_on:
|
||||
- opensearch
|
||||
environment:
|
||||
OPENSEARCH_HOSTS: '["https://opensearch:9200"]'
|
||||
OPENSEARCH_USERNAME: "admin"
|
||||
OPENSEARCH_PASSWORD: ${OPENSEARCH_PASSWORD}
|
||||
ports:
|
||||
- "5601:5601"
|
||||
|
||||
openrag-backend:
|
||||
image: langflowai/openrag-backend:${OPENRAG_VERSION:-latest}
|
||||
# build:
|
||||
# context: .
|
||||
# dockerfile: Dockerfile.backend
|
||||
container_name: openrag-backend
|
||||
depends_on:
|
||||
- langflow
|
||||
environment:
|
||||
- OPENSEARCH_HOST=opensearch
|
||||
- LANGFLOW_URL=http://langflow:7860
|
||||
- LANGFLOW_PUBLIC_URL=${LANGFLOW_PUBLIC_URL}
|
||||
- LANGFLOW_AUTO_LOGIN=${LANGFLOW_AUTO_LOGIN}
|
||||
- LANGFLOW_SECRET_KEY=${LANGFLOW_SECRET_KEY}
|
||||
- LANGFLOW_SUPERUSER=${LANGFLOW_SUPERUSER}
|
||||
- LANGFLOW_SUPERUSER_PASSWORD=${LANGFLOW_SUPERUSER_PASSWORD}
|
||||
- LANGFLOW_CHAT_FLOW_ID=${LANGFLOW_CHAT_FLOW_ID}
|
||||
- LANGFLOW_INGEST_FLOW_ID=${LANGFLOW_INGEST_FLOW_ID}
|
||||
- LANGFLOW_URL_INGEST_FLOW_ID=${LANGFLOW_URL_INGEST_FLOW_ID}
|
||||
- DISABLE_INGEST_WITH_LANGFLOW=${DISABLE_INGEST_WITH_LANGFLOW:-false}
|
||||
- NUDGES_FLOW_ID=${NUDGES_FLOW_ID}
|
||||
- OPENSEARCH_PORT=9200
|
||||
- OPENSEARCH_USERNAME=admin
|
||||
- OPENSEARCH_PASSWORD=${OPENSEARCH_PASSWORD}
|
||||
- OPENAI_API_KEY=${OPENAI_API_KEY}
|
||||
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
|
||||
- WATSONX_API_KEY=${WATSONX_API_KEY}
|
||||
- WATSONX_ENDPOINT=${WATSONX_ENDPOINT}
|
||||
- WATSONX_PROJECT_ID=${WATSONX_PROJECT_ID}
|
||||
- OLLAMA_ENDPOINT=${OLLAMA_ENDPOINT}
|
||||
- GOOGLE_OAUTH_CLIENT_ID=${GOOGLE_OAUTH_CLIENT_ID}
|
||||
- GOOGLE_OAUTH_CLIENT_SECRET=${GOOGLE_OAUTH_CLIENT_SECRET}
|
||||
- MICROSOFT_GRAPH_OAUTH_CLIENT_ID=${MICROSOFT_GRAPH_OAUTH_CLIENT_ID}
|
||||
- MICROSOFT_GRAPH_OAUTH_CLIENT_SECRET=${MICROSOFT_GRAPH_OAUTH_CLIENT_SECRET}
|
||||
- WEBHOOK_BASE_URL=${WEBHOOK_BASE_URL}
|
||||
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
|
||||
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
|
||||
volumes:
|
||||
- ./openrag-documents:/app/openrag-documents:Z
|
||||
- ./keys:/app/keys:Z
|
||||
- ./flows:/app/flows:U,z
|
||||
- ./config:/app/config:Z
|
||||
|
||||
openrag-frontend:
|
||||
image: langflowai/openrag-frontend:${OPENRAG_VERSION:-latest}
|
||||
# build:
|
||||
# context: .
|
||||
# dockerfile: Dockerfile.frontend
|
||||
container_name: openrag-frontend
|
||||
depends_on:
|
||||
- openrag-backend
|
||||
environment:
|
||||
- OPENRAG_BACKEND_HOST=openrag-backend
|
||||
ports:
|
||||
- "3000:3000"
|
||||
|
||||
langflow:
|
||||
volumes:
|
||||
- ./flows:/app/flows:U,z
|
||||
image: langflowai/openrag-langflow:${LANGFLOW_VERSION:-latest}
|
||||
# build:
|
||||
# context: .
|
||||
# dockerfile: Dockerfile.langflow
|
||||
container_name: langflow
|
||||
ports:
|
||||
- "7860:7860"
|
||||
environment:
|
||||
- LANGFLOW_DEACTIVATE_TRACING=true
|
||||
- OPENAI_API_KEY=${OPENAI_API_KEY}
|
||||
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
|
||||
- WATSONX_API_KEY=${WATSONX_API_KEY}
|
||||
- WATSONX_ENDPOINT=${WATSONX_ENDPOINT}
|
||||
- WATSONX_PROJECT_ID=${WATSONX_PROJECT_ID}
|
||||
- OLLAMA_BASE_URL=${OLLAMA_ENDPOINT}
|
||||
- LANGFLOW_LOAD_FLOWS_PATH=/app/flows
|
||||
- LANGFLOW_SECRET_KEY=${LANGFLOW_SECRET_KEY}
|
||||
- JWT=None
|
||||
- OWNER=None
|
||||
- OWNER_NAME=None
|
||||
- OWNER_EMAIL=None
|
||||
- CONNECTOR_TYPE=system
|
||||
- CONNECTOR_TYPE_URL=url
|
||||
- OPENRAG-QUERY-FILTER="{}"
|
||||
- OPENSEARCH_PASSWORD=${OPENSEARCH_PASSWORD}
|
||||
- FILENAME=None
|
||||
- MIMETYPE=None
|
||||
- FILESIZE=0
|
||||
- SELECTED_EMBEDDING_MODEL=${SELECTED_EMBEDDING_MODEL:-}
|
||||
- LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,FILENAME,MIMETYPE,FILESIZE,SELECTED_EMBEDDING_MODEL
|
||||
- LANGFLOW_LOG_LEVEL=DEBUG
|
||||
- LANGFLOW_AUTO_LOGIN=${LANGFLOW_AUTO_LOGIN}
|
||||
- LANGFLOW_SUPERUSER=${LANGFLOW_SUPERUSER}
|
||||
- LANGFLOW_SUPERUSER_PASSWORD=${LANGFLOW_SUPERUSER_PASSWORD}
|
||||
- LANGFLOW_NEW_USER_IS_ACTIVE=${LANGFLOW_NEW_USER_IS_ACTIVE}
|
||||
- LANGFLOW_ENABLE_SUPERUSER_CLI=${LANGFLOW_ENABLE_SUPERUSER_CLI}
|
||||
# - DEFAULT_FOLDER_NAME=OpenRAG
|
||||
- HIDE_GETTING_STARTED_PROGRESS=true
|
||||
|
||||
|
|
@ -133,7 +133,6 @@ services:
|
|||
- MIMETYPE=None
|
||||
- FILESIZE=0
|
||||
- SELECTED_EMBEDDING_MODEL=${SELECTED_EMBEDDING_MODEL:-}
|
||||
- OPENSEARCH_PASSWORD=${OPENSEARCH_PASSWORD}
|
||||
- LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,FILENAME,MIMETYPE,FILESIZE,SELECTED_EMBEDDING_MODEL,OPENAI_API_KEY,ANTHROPIC_API_KEY,WATSONX_API_KEY,WATSONX_ENDPOINT,WATSONX_PROJECT_ID,OLLAMA_BASE_URL
|
||||
- LANGFLOW_LOG_LEVEL=DEBUG
|
||||
- LANGFLOW_AUTO_LOGIN=${LANGFLOW_AUTO_LOGIN}
|
||||
|
|
|
|||
|
|
@ -114,31 +114,28 @@ The following variables are required or recommended:
|
|||
PID: 27746
|
||||
```
|
||||
|
||||
3. Deploy the OpenRAG containers locally using the appropriate Docker Compose file for your environment.
|
||||
Both files deploy the same services.
|
||||
3. Deploy the OpenRAG containers locally using the appropriate Docker Compose configuration for your environment.
|
||||
|
||||
* [`docker-compose.yml`](https://github.com/langflow-ai/openrag/blob/main/docker-compose.yml): If your host machine has an NVIDIA GPU with CUDA support and compatible NVIDIA drivers, you can use this file to deploy OpenRAG with accelerated processing.
|
||||
* **GPU-accelerated deployment**: If your host machine has an NVIDIA GPU with CUDA support and compatible NVIDIA drivers, use the base `docker-compose.yml` file with the `docker-compose.gpu.yml` override.
|
||||
|
||||
```bash title="Docker"
|
||||
docker compose -f docker-compose.yml -f docker-compose.gpu.yml up -d
|
||||
```
|
||||
|
||||
```bash title="Podman"
|
||||
podman compose -f docker-compose.yml -f docker-compose.gpu.yml up -d
|
||||
```
|
||||
|
||||
* **CPU-only deployment** (default): If your host machine doesn't have NVIDIA GPU support, use the base `docker-compose.yml` file.
|
||||
|
||||
```bash title="Docker"
|
||||
docker compose build
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
```bash title="Podman"
|
||||
podman compose build
|
||||
podman compose up -d
|
||||
```
|
||||
|
||||
* [`docker-compose-cpu.yml`](https://github.com/langflow-ai/openrag/blob/main/docker-compose-cpu.yml): If your host machine doesn't have NVIDIA GPU support, use this file for a CPU-only OpenRAG deployment.
|
||||
|
||||
```bash title="Docker"
|
||||
docker compose -f docker-compose-cpu.yml up -d
|
||||
```
|
||||
|
||||
```bash title="Podman"
|
||||
podman compose -f docker-compose-cpu.yml up -d
|
||||
```
|
||||
|
||||
4. Wait for the OpenRAG containers to start, and then confirm that all containers are running:
|
||||
|
||||
```bash title="Docker"
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ For more information about this variable and how this variable controls Langflow
|
|||
|
||||
## Container out of memory errors
|
||||
|
||||
Increase Docker memory allocation or use [docker-compose-cpu.yml](https://github.com/langflow-ai/openrag/blob/main/docker-compose-cpu.yml) to deploy OpenRAG.
|
||||
Increase Docker memory allocation or use the CPU-only deployment (base `docker-compose.yml` without GPU override) to reduce memory usage.
|
||||
|
||||
## Memory issue with Podman on macOS
|
||||
|
||||
|
|
|
|||
|
|
@ -1338,3 +1338,279 @@ class ContainerManager:
|
|||
self.platform_detector.check_podman_macos_memory()
|
||||
)
|
||||
return is_sufficient, message
|
||||
|
||||
async def prune_old_images(self) -> AsyncIterator[tuple[bool, str]]:
|
||||
"""Prune old OpenRAG images and dependencies, keeping only the latest versions.
|
||||
|
||||
This method:
|
||||
1. Lists all images
|
||||
2. Identifies OpenRAG-related images (openrag-backend, openrag-frontend, langflow, opensearch, dashboards)
|
||||
3. For each repository, keeps only the latest/currently used image
|
||||
4. Removes old images
|
||||
5. Prunes dangling images
|
||||
|
||||
Yields:
|
||||
Tuples of (success, message) for progress updates
|
||||
"""
|
||||
if not self.is_available():
|
||||
yield False, "No container runtime available"
|
||||
return
|
||||
|
||||
yield False, "Scanning for OpenRAG images..."
|
||||
|
||||
# Get list of all images
|
||||
success, stdout, stderr = await self._run_runtime_command(
|
||||
["images", "--format", "{{.Repository}}:{{.Tag}}\t{{.ID}}\t{{.CreatedAt}}"]
|
||||
)
|
||||
|
||||
if not success:
|
||||
yield False, f"Failed to list images: {stderr}"
|
||||
return
|
||||
|
||||
# Parse images and group by repository
|
||||
openrag_repos = {
|
||||
"langflowai/openrag-backend",
|
||||
"langflowai/openrag-frontend",
|
||||
"langflowai/openrag-langflow",
|
||||
"langflowai/openrag-opensearch",
|
||||
"langflowai/openrag-dashboards",
|
||||
"langflow/langflow", # Also include base langflow images
|
||||
"opensearchproject/opensearch",
|
||||
"opensearchproject/opensearch-dashboards",
|
||||
}
|
||||
|
||||
images_by_repo = {}
|
||||
for line in stdout.strip().split("\n"):
|
||||
if not line.strip():
|
||||
continue
|
||||
|
||||
parts = line.split("\t")
|
||||
if len(parts) < 3:
|
||||
continue
|
||||
|
||||
image_tag, image_id, created_at = parts[0], parts[1], parts[2]
|
||||
|
||||
# Skip <none> tags (dangling images will be handled separately)
|
||||
if "<none>" in image_tag:
|
||||
continue
|
||||
|
||||
# Extract repository name (without tag)
|
||||
if ":" in image_tag:
|
||||
repo = image_tag.rsplit(":", 1)[0]
|
||||
else:
|
||||
repo = image_tag
|
||||
|
||||
# Check if this is an OpenRAG-related image
|
||||
if any(openrag_repo in repo for openrag_repo in openrag_repos):
|
||||
if repo not in images_by_repo:
|
||||
images_by_repo[repo] = []
|
||||
images_by_repo[repo].append({
|
||||
"full_tag": image_tag,
|
||||
"id": image_id,
|
||||
"created": created_at,
|
||||
})
|
||||
|
||||
if not images_by_repo:
|
||||
yield True, "No OpenRAG images found to prune"
|
||||
# Still run dangling image cleanup
|
||||
yield False, "Cleaning up dangling images..."
|
||||
success, stdout, stderr = await self._run_runtime_command(
|
||||
["image", "prune", "-f"]
|
||||
)
|
||||
if success:
|
||||
yield True, "Dangling images cleaned up"
|
||||
else:
|
||||
yield False, f"Failed to prune dangling images: {stderr}"
|
||||
return
|
||||
|
||||
# Get currently used images (from running/stopped containers)
|
||||
services = await self.get_service_status(force_refresh=True)
|
||||
current_images = set()
|
||||
for service_info in services.values():
|
||||
if service_info.image and service_info.image != "N/A":
|
||||
current_images.add(service_info.image)
|
||||
|
||||
yield False, f"Found {len(images_by_repo)} OpenRAG image repositories"
|
||||
|
||||
# For each repository, remove old images (keep latest and currently used)
|
||||
total_removed = 0
|
||||
for repo, images in images_by_repo.items():
|
||||
if len(images) <= 1:
|
||||
# Only one image for this repo, skip
|
||||
continue
|
||||
|
||||
# Sort by creation date (newest first)
|
||||
# Note: This is a simple string comparison which works for ISO dates
|
||||
images.sort(key=lambda x: x["created"], reverse=True)
|
||||
|
||||
# Keep the newest image and any currently used images
|
||||
images_to_remove = []
|
||||
for i, img in enumerate(images):
|
||||
# Keep the first (newest) image
|
||||
if i == 0:
|
||||
continue
|
||||
# Keep currently used images
|
||||
if img["full_tag"] in current_images:
|
||||
continue
|
||||
# Mark for removal
|
||||
images_to_remove.append(img)
|
||||
|
||||
if not images_to_remove:
|
||||
yield False, f"No old images to remove for {repo}"
|
||||
continue
|
||||
|
||||
# Remove old images
|
||||
for img in images_to_remove:
|
||||
yield False, f"Removing old image: {img['full_tag']}"
|
||||
success, stdout, stderr = await self._run_runtime_command(
|
||||
["rmi", img["id"]]
|
||||
)
|
||||
if success:
|
||||
total_removed += 1
|
||||
yield False, f" ✓ Removed {img['full_tag']}"
|
||||
else:
|
||||
# Don't fail the whole operation if one image fails
|
||||
# (might be in use by another container)
|
||||
yield False, f" ⚠ Could not remove {img['full_tag']}: {stderr.strip()}"
|
||||
|
||||
if total_removed > 0:
|
||||
yield True, f"Removed {total_removed} old image(s)"
|
||||
else:
|
||||
yield True, "No old images were removed"
|
||||
|
||||
# Clean up dangling images (untagged images)
|
||||
yield False, "Cleaning up dangling images..."
|
||||
success, stdout, stderr = await self._run_runtime_command(
|
||||
["image", "prune", "-f"]
|
||||
)
|
||||
|
||||
if success:
|
||||
# Parse output to see if anything was removed
|
||||
if stdout.strip():
|
||||
yield True, f"Dangling images cleaned: {stdout.strip()}"
|
||||
else:
|
||||
yield True, "No dangling images to clean"
|
||||
else:
|
||||
yield False, f"Failed to prune dangling images: {stderr}"
|
||||
|
||||
yield True, "Image pruning completed"
|
||||
|
||||
async def prune_all_images(self) -> AsyncIterator[tuple[bool, str]]:
|
||||
"""Stop services and prune ALL OpenRAG images and dependencies.
|
||||
|
||||
This is a more aggressive pruning that:
|
||||
1. Stops all running services
|
||||
2. Removes ALL OpenRAG-related images (not just old versions)
|
||||
3. Prunes dangling images
|
||||
|
||||
This frees up maximum disk space but requires re-downloading images on next start.
|
||||
|
||||
Yields:
|
||||
Tuples of (success, message) for progress updates
|
||||
"""
|
||||
if not self.is_available():
|
||||
yield False, "No container runtime available"
|
||||
return
|
||||
|
||||
# Step 1: Stop all services first
|
||||
yield False, "Stopping all services..."
|
||||
async for success, message in self.stop_services():
|
||||
yield success, message
|
||||
if not success and "failed" in message.lower():
|
||||
yield False, "Failed to stop services, aborting prune"
|
||||
return
|
||||
|
||||
# Give services time to fully stop
|
||||
import asyncio
|
||||
await asyncio.sleep(2)
|
||||
|
||||
yield False, "Scanning for OpenRAG images..."
|
||||
|
||||
# Get list of all images
|
||||
success, stdout, stderr = await self._run_runtime_command(
|
||||
["images", "--format", "{{.Repository}}:{{.Tag}}\t{{.ID}}"]
|
||||
)
|
||||
|
||||
if not success:
|
||||
yield False, f"Failed to list images: {stderr}"
|
||||
return
|
||||
|
||||
# Parse images and identify ALL OpenRAG-related images
|
||||
openrag_repos = {
|
||||
"langflowai/openrag-backend",
|
||||
"langflowai/openrag-frontend",
|
||||
"langflowai/openrag-langflow",
|
||||
"langflowai/openrag-opensearch",
|
||||
"langflowai/openrag-dashboards",
|
||||
"langflow/langflow",
|
||||
"opensearchproject/opensearch",
|
||||
"opensearchproject/opensearch-dashboards",
|
||||
}
|
||||
|
||||
images_to_remove = []
|
||||
for line in stdout.strip().split("\n"):
|
||||
if not line.strip():
|
||||
continue
|
||||
|
||||
parts = line.split("\t")
|
||||
if len(parts) < 2:
|
||||
continue
|
||||
|
||||
image_tag, image_id = parts[0], parts[1]
|
||||
|
||||
# Skip <none> tags (will be handled by prune)
|
||||
if "<none>" in image_tag:
|
||||
continue
|
||||
|
||||
# Extract repository name (without tag)
|
||||
if ":" in image_tag:
|
||||
repo = image_tag.rsplit(":", 1)[0]
|
||||
else:
|
||||
repo = image_tag
|
||||
|
||||
# Check if this is an OpenRAG-related image
|
||||
if any(openrag_repo in repo for openrag_repo in openrag_repos):
|
||||
images_to_remove.append({
|
||||
"full_tag": image_tag,
|
||||
"id": image_id,
|
||||
})
|
||||
|
||||
if not images_to_remove:
|
||||
yield True, "No OpenRAG images found to remove"
|
||||
else:
|
||||
yield False, f"Found {len(images_to_remove)} OpenRAG image(s) to remove"
|
||||
|
||||
# Remove all OpenRAG images
|
||||
total_removed = 0
|
||||
for img in images_to_remove:
|
||||
yield False, f"Removing image: {img['full_tag']}"
|
||||
success, stdout, stderr = await self._run_runtime_command(
|
||||
["rmi", "-f", img["id"]] # Force remove
|
||||
)
|
||||
if success:
|
||||
total_removed += 1
|
||||
yield False, f" ✓ Removed {img['full_tag']}"
|
||||
else:
|
||||
yield False, f" ⚠ Could not remove {img['full_tag']}: {stderr.strip()}"
|
||||
|
||||
if total_removed > 0:
|
||||
yield True, f"Removed {total_removed} OpenRAG image(s)"
|
||||
else:
|
||||
yield False, "No images were removed"
|
||||
|
||||
# Clean up dangling images
|
||||
yield False, "Cleaning up dangling images..."
|
||||
success, stdout, stderr = await self._run_runtime_command(
|
||||
["image", "prune", "-f"]
|
||||
)
|
||||
|
||||
if success:
|
||||
if stdout.strip():
|
||||
yield True, f"Dangling images cleaned: {stdout.strip()}"
|
||||
else:
|
||||
yield True, "No dangling images to clean"
|
||||
else:
|
||||
yield False, f"Failed to prune dangling images: {stderr}"
|
||||
|
||||
yield True, "All OpenRAG images removed successfully"
|
||||
|
||||
|
|
|
|||
|
|
@ -79,6 +79,7 @@ class EnvConfig:
|
|||
openrag_config_path: str = "$HOME/.openrag/config"
|
||||
openrag_data_path: str = "$HOME/.openrag/data" # Backend data (conversations, tokens, etc.)
|
||||
opensearch_data_path: str = "$HOME/.openrag/data/opensearch-data"
|
||||
openrag_tui_config_path_legacy: str = "$HOME/.openrag/tui/config"
|
||||
|
||||
# Container version (linked to TUI version)
|
||||
openrag_version: str = ""
|
||||
|
|
|
|||
|
|
@ -296,6 +296,8 @@ class MonitorScreen(Screen):
|
|||
self.run_worker(self._upgrade_services())
|
||||
elif button_id.startswith("reset-btn"):
|
||||
self.run_worker(self._reset_services())
|
||||
elif button_id.startswith("prune-btn"):
|
||||
self.run_worker(self._prune_images())
|
||||
elif button_id.startswith("docling-start-btn"):
|
||||
self.run_worker(self._start_docling_serve())
|
||||
elif button_id.startswith("docling-stop-btn"):
|
||||
|
|
@ -502,6 +504,16 @@ class MonitorScreen(Screen):
|
|||
# Recreate empty config directory
|
||||
config_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Also delete legacy TUI config folder if it exists (~/.openrag/tui/config/)
|
||||
tui_config_path = expand_path(env_manager.config.openrag_tui_config_path_legacy)
|
||||
if tui_config_path.exists():
|
||||
success, msg = await self.container_manager.clear_directory_with_container(tui_config_path)
|
||||
if not success:
|
||||
# Fallback to regular rmtree if container method fails
|
||||
shutil.rmtree(tui_config_path)
|
||||
# Recreate empty config directory
|
||||
tui_config_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Delete flow backups only if user chose to (and they actually exist)
|
||||
if self._check_flow_backups():
|
||||
if delete_backups:
|
||||
|
|
@ -565,6 +577,39 @@ class MonitorScreen(Screen):
|
|||
|
||||
yield True, "Factory reset completed successfully"
|
||||
|
||||
async def _prune_images(self) -> None:
|
||||
"""Prune old OpenRAG images with progress updates."""
|
||||
self.operation_in_progress = True
|
||||
try:
|
||||
# Show prune options modal
|
||||
from tui.widgets.prune_options_modal import PruneOptionsModal
|
||||
|
||||
prune_choice = await self.app.push_screen_wait(PruneOptionsModal())
|
||||
|
||||
if prune_choice == "cancel":
|
||||
self.notify("Prune cancelled", severity="information")
|
||||
return
|
||||
|
||||
# Choose the appropriate pruning method based on user choice
|
||||
if prune_choice == "all":
|
||||
# Stop services and prune all images
|
||||
command_generator = self.container_manager.prune_all_images()
|
||||
modal_title = "Stopping Services & Pruning All Images"
|
||||
else:
|
||||
# Prune only unused images (default)
|
||||
command_generator = self.container_manager.prune_old_images()
|
||||
modal_title = "Pruning Unused Images"
|
||||
|
||||
# Show command output in modal dialog
|
||||
modal = CommandOutputModal(
|
||||
modal_title,
|
||||
command_generator,
|
||||
on_complete=None, # We'll refresh in on_screen_resume instead
|
||||
)
|
||||
self.app.push_screen(modal)
|
||||
finally:
|
||||
self.operation_in_progress = False
|
||||
|
||||
def _check_flow_backups(self) -> bool:
|
||||
"""Check if there are any flow backups in flows/backup directory."""
|
||||
from pathlib import Path
|
||||
|
|
@ -861,10 +906,13 @@ class MonitorScreen(Screen):
|
|||
Button("Start Services", variant="success", id=f"start-btn{suffix}")
|
||||
)
|
||||
|
||||
# Always show upgrade and reset buttons
|
||||
# Always show upgrade, prune, and reset buttons
|
||||
controls.mount(
|
||||
Button("Upgrade", variant="warning", id=f"upgrade-btn{suffix}")
|
||||
)
|
||||
controls.mount(
|
||||
Button("Prune Images", variant="default", id=f"prune-btn{suffix}")
|
||||
)
|
||||
controls.mount(Button("Factory Reset", variant="error", id=f"reset-btn{suffix}"))
|
||||
|
||||
except Exception as e:
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ class WelcomeScreen(Screen):
|
|||
|
||||
BINDINGS = [
|
||||
("q", "quit", "Quit"),
|
||||
("r", "refresh", "Refresh"),
|
||||
]
|
||||
|
||||
def __init__(self):
|
||||
|
|
@ -305,16 +306,10 @@ class WelcomeScreen(Screen):
|
|||
else:
|
||||
self.default_button_id = "basic-setup-btn"
|
||||
|
||||
# Update the welcome text
|
||||
try:
|
||||
welcome_widget = self.query_one("#welcome-text")
|
||||
welcome_widget.update(self._create_welcome_text())
|
||||
except:
|
||||
pass # Widget might not be mounted yet
|
||||
|
||||
# Focus the appropriate button (the buttons are created correctly in compose,
|
||||
# the issue was they weren't being updated after service operations)
|
||||
self.call_after_refresh(self._focus_appropriate_button)
|
||||
# Refresh the welcome text AND buttons based on the updated async state
|
||||
# This ensures buttons match the actual service state (fixes issue where
|
||||
# text showed "All services running" but buttons weren't updated)
|
||||
await self._refresh_welcome_content()
|
||||
|
||||
def _focus_appropriate_button(self) -> None:
|
||||
"""Focus the appropriate button based on current state."""
|
||||
|
|
@ -341,8 +336,22 @@ class WelcomeScreen(Screen):
|
|||
os.getenv("MICROSOFT_GRAPH_OAUTH_CLIENT_ID")
|
||||
)
|
||||
|
||||
# Re-detect service state
|
||||
self._detect_services_sync()
|
||||
# Re-detect container services using async method for accuracy
|
||||
if self.container_manager.is_available():
|
||||
services = await self.container_manager.get_service_status(force_refresh=True)
|
||||
expected = set(self.container_manager.expected_services)
|
||||
running_services = [
|
||||
s.name for s in services.values() if s.status == ServiceStatus.RUNNING
|
||||
]
|
||||
starting_services = [
|
||||
s.name for s in services.values() if s.status == ServiceStatus.STARTING
|
||||
]
|
||||
self.services_running = len(running_services) == len(expected) and len(starting_services) == 0
|
||||
else:
|
||||
self.services_running = False
|
||||
|
||||
# Re-detect native service state
|
||||
self.docling_running = self.docling_manager.is_running()
|
||||
|
||||
# Refresh the welcome content and buttons
|
||||
await self._refresh_welcome_content()
|
||||
|
|
@ -397,6 +406,38 @@ class WelcomeScreen(Screen):
|
|||
|
||||
self.app.push_screen(DiagnosticsScreen())
|
||||
|
||||
def action_refresh(self) -> None:
|
||||
"""Refresh service state and update welcome screen."""
|
||||
self.run_worker(self._refresh_state())
|
||||
|
||||
async def _refresh_state(self) -> None:
|
||||
"""Async refresh of service state."""
|
||||
# Re-detect container services using async method for accuracy
|
||||
if self.container_manager.is_available():
|
||||
services = await self.container_manager.get_service_status(force_refresh=True)
|
||||
expected = set(self.container_manager.expected_services)
|
||||
running_services = [
|
||||
s.name for s in services.values() if s.status == ServiceStatus.RUNNING
|
||||
]
|
||||
starting_services = [
|
||||
s.name for s in services.values() if s.status == ServiceStatus.STARTING
|
||||
]
|
||||
self.services_running = len(running_services) == len(expected) and len(starting_services) == 0
|
||||
else:
|
||||
self.services_running = False
|
||||
|
||||
# Re-detect native service state
|
||||
self.docling_running = self.docling_manager.is_running()
|
||||
|
||||
# Update OAuth config state
|
||||
self.has_oauth_config = bool(os.getenv("GOOGLE_OAUTH_CLIENT_ID")) or bool(
|
||||
os.getenv("MICROSOFT_GRAPH_OAUTH_CLIENT_ID")
|
||||
)
|
||||
|
||||
# Refresh the welcome content and buttons
|
||||
await self._refresh_welcome_content()
|
||||
self.notify("Refreshed", severity="information", timeout=2)
|
||||
|
||||
def action_start_all_services(self) -> None:
|
||||
"""Start all services (native first, then containers)."""
|
||||
self.run_worker(self._start_all_services())
|
||||
|
|
|
|||
106
src/tui/widgets/prune_options_modal.py
Normal file
106
src/tui/widgets/prune_options_modal.py
Normal file
|
|
@ -0,0 +1,106 @@
|
|||
"""Prune options modal for OpenRAG TUI."""
|
||||
|
||||
from textual.app import ComposeResult
|
||||
from textual.containers import Container, Horizontal
|
||||
from textual.screen import ModalScreen
|
||||
from textual.widgets import Button, Static, Label
|
||||
|
||||
|
||||
class PruneOptionsModal(ModalScreen[str]):
|
||||
"""Modal dialog to choose prune options."""
|
||||
|
||||
DEFAULT_CSS = """
|
||||
PruneOptionsModal {
|
||||
align: center middle;
|
||||
}
|
||||
|
||||
#dialog {
|
||||
width: 70;
|
||||
height: auto;
|
||||
border: solid #3f3f46;
|
||||
background: #27272a;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
#title {
|
||||
background: #ec4899;
|
||||
color: #fafafa;
|
||||
padding: 1 2;
|
||||
text-align: center;
|
||||
width: 100%;
|
||||
text-style: bold;
|
||||
}
|
||||
|
||||
#message {
|
||||
padding: 2;
|
||||
color: #fafafa;
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
#button-row {
|
||||
width: 100%;
|
||||
height: auto;
|
||||
align: center middle;
|
||||
padding: 1;
|
||||
margin-top: 1;
|
||||
}
|
||||
|
||||
#button-row Button {
|
||||
margin: 0 1;
|
||||
min-width: 20;
|
||||
background: #27272a;
|
||||
color: #fafafa;
|
||||
border: round #52525b;
|
||||
text-style: none;
|
||||
tint: transparent 0%;
|
||||
}
|
||||
|
||||
#button-row Button:hover {
|
||||
background: #27272a !important;
|
||||
color: #fafafa !important;
|
||||
border: round #52525b;
|
||||
tint: transparent 0%;
|
||||
text-style: none;
|
||||
}
|
||||
|
||||
#button-row Button:focus {
|
||||
background: #27272a !important;
|
||||
color: #fafafa !important;
|
||||
border: round #ec4899;
|
||||
tint: transparent 0%;
|
||||
text-style: none;
|
||||
}
|
||||
"""
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Create the modal dialog layout."""
|
||||
with Container(id="dialog"):
|
||||
yield Label("🗑️ Prune Images", id="title")
|
||||
yield Static(
|
||||
"Choose how to prune OpenRAG images:\n\n"
|
||||
"• Prune Unused Only\n"
|
||||
" Remove old versions, keep latest and currently used images\n"
|
||||
" (Services will continue running)\n\n"
|
||||
"• Stop & Prune All\n"
|
||||
" Stop all services and remove ALL OpenRAG images\n"
|
||||
" (Frees maximum disk space, images will be re-downloaded on next start)\n\n"
|
||||
"What would you like to do?",
|
||||
id="message",
|
||||
)
|
||||
with Horizontal(id="button-row"):
|
||||
yield Button("Cancel", id="cancel-btn")
|
||||
yield Button("Prune Unused Only", id="prune-unused-btn", variant="primary")
|
||||
yield Button("Stop & Prune All", id="prune-all-btn", variant="warning")
|
||||
|
||||
def on_mount(self) -> None:
|
||||
"""Focus the prune unused button by default."""
|
||||
self.query_one("#prune-unused-btn", Button).focus()
|
||||
|
||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||
"""Handle button presses."""
|
||||
if event.button.id == "prune-unused-btn":
|
||||
self.dismiss("unused") # Prune only unused images
|
||||
elif event.button.id == "prune-all-btn":
|
||||
self.dismiss("all") # Stop services and prune all
|
||||
else:
|
||||
self.dismiss("cancel") # User cancelled
|
||||
Loading…
Add table
Reference in a new issue