Merge pull request #698 from langflow-ai/copilot/fix-duplicate-opensearch-password

Remove duplicate OPENSEARCH_PASSWORD and legacy docker-compose-cpu.yml
This commit is contained in:
Sebastián Estévez 2025-12-19 17:10:33 -05:00 committed by GitHub
commit 2ea4c6192a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 23 additions and 172 deletions

View file

@ -55,7 +55,7 @@ help:
# Development environments
dev:
@echo "🚀 Starting OpenRAG with GPU support..."
docker compose up -d
docker compose -f docker-compose.yml -f docker-compose.gpu.yml up -d
@echo "✅ Services started!"
@echo " Backend: http://localhost:8000"
@echo " Frontend: http://localhost:3000"
@ -65,7 +65,7 @@ dev:
dev-cpu:
@echo "🚀 Starting OpenRAG with CPU only..."
docker compose -f docker-compose-cpu.yml up -d
docker compose up -d
@echo "✅ Services started!"
@echo " Backend: http://localhost:8000"
@echo " Frontend: http://localhost:3000"
@ -93,7 +93,7 @@ infra:
infra-cpu:
@echo "🔧 Starting infrastructure services only..."
docker-compose -f docker-compose-cpu.yml up -d opensearch dashboards langflow
docker compose up -d opensearch dashboards langflow
@echo "✅ Infrastructure services started!"
@echo " Langflow: http://localhost:7860"
@echo " OpenSearch: http://localhost:9200"
@ -103,14 +103,12 @@ infra-cpu:
stop:
@echo "🛑 Stopping all containers..."
docker compose down
docker compose -f docker-compose-cpu.yml down 2>/dev/null || true
restart: stop dev
clean: stop
@echo "🧹 Cleaning up containers and volumes..."
docker compose down -v --remove-orphans
docker compose -f docker-compose-cpu.yml down -v --remove-orphans 2>/dev/null || true
docker system prune -f
# Local development
@ -210,13 +208,13 @@ test-ci:
chmod 644 keys/public_key.pem 2>/dev/null || true; \
fi; \
echo "Cleaning up old containers and volumes..."; \
docker compose -f docker-compose-cpu.yml down -v 2>/dev/null || true; \
docker compose down -v 2>/dev/null || true; \
echo "Pulling latest images..."; \
docker compose -f docker-compose-cpu.yml pull; \
docker compose pull; \
echo "Building OpenSearch image override..."; \
docker build --no-cache -t langflowai/openrag-opensearch:latest -f Dockerfile .; \
echo "Starting infra (OpenSearch + Dashboards + Langflow + Backend + Frontend) with CPU containers"; \
docker compose -f docker-compose-cpu.yml up -d opensearch dashboards langflow openrag-backend openrag-frontend; \
docker compose up -d opensearch dashboards langflow openrag-backend openrag-frontend; \
echo "Starting docling-serve..."; \
DOCLING_ENDPOINT=$$(uv run python scripts/docling_ctl.py start --port 5001 | grep "Endpoint:" | awk '{print $$2}'); \
echo "Docling-serve started at $$DOCLING_ENDPOINT"; \
@ -288,7 +286,7 @@ test-ci:
echo ""; \
echo "Tearing down infra"; \
uv run python scripts/docling_ctl.py stop || true; \
docker compose -f docker-compose-cpu.yml down -v 2>/dev/null || true; \
docker compose down -v 2>/dev/null || true; \
exit $$TEST_RESULT
# CI-friendly integration test target with local builds: builds all images, brings up infra, waits, runs tests, tears down
@ -305,14 +303,14 @@ test-ci-local:
chmod 644 keys/public_key.pem 2>/dev/null || true; \
fi; \
echo "Cleaning up old containers and volumes..."; \
docker compose -f docker-compose-cpu.yml down -v 2>/dev/null || true; \
docker compose down -v 2>/dev/null || true; \
echo "Building all images locally..."; \
docker build -t langflowai/openrag-opensearch:latest -f Dockerfile .; \
docker build -t langflowai/openrag-backend:latest -f Dockerfile.backend .; \
docker build -t langflowai/openrag-frontend:latest -f Dockerfile.frontend .; \
docker build -t langflowai/openrag-langflow:latest -f Dockerfile.langflow .; \
echo "Starting infra (OpenSearch + Dashboards + Langflow + Backend + Frontend) with CPU containers"; \
docker compose -f docker-compose-cpu.yml up -d opensearch dashboards langflow openrag-backend openrag-frontend; \
docker compose up -d opensearch dashboards langflow openrag-backend openrag-frontend; \
echo "Starting docling-serve..."; \
DOCLING_ENDPOINT=$$(uv run python scripts/docling_ctl.py start --port 5001 | grep "Endpoint:" | awk '{print $$2}'); \
echo "Docling-serve started at $$DOCLING_ENDPOINT"; \
@ -394,7 +392,7 @@ test-ci-local:
fi; \
echo "Tearing down infra"; \
uv run python scripts/docling_ctl.py stop || true; \
docker compose -f docker-compose-cpu.yml down -v 2>/dev/null || true; \
docker compose down -v 2>/dev/null || true; \
exit $$TEST_RESULT
# SDK integration tests (requires running OpenRAG instance)

View file

@ -1,143 +0,0 @@
services:
opensearch:
image: langflowai/openrag-opensearch:${OPENRAG_VERSION:-latest}
#build:
# context: .
# dockerfile: Dockerfile
container_name: os
depends_on:
- openrag-backend
environment:
- discovery.type=single-node
- OPENSEARCH_INITIAL_ADMIN_PASSWORD=${OPENSEARCH_PASSWORD}
# Run security setup in background after OpenSearch starts
command: >
bash -c "
# Ensure data directory has correct permissions
sudo chown -R opensearch:opensearch /usr/share/opensearch/data || true
# Start OpenSearch in background
/usr/share/opensearch/opensearch-docker-entrypoint.sh opensearch &
# Wait a bit for OpenSearch to start, then apply security config
sleep 10 && /usr/share/opensearch/setup-security.sh &
# Wait for background processes
wait
"
ports:
- "9200:9200"
- "9600:9600"
volumes:
- ${OPENSEARCH_DATA_PATH:-./opensearch-data}:/usr/share/opensearch/data:Z
dashboards:
image: opensearchproject/opensearch-dashboards:3.0.0
container_name: osdash
depends_on:
- opensearch
environment:
OPENSEARCH_HOSTS: '["https://opensearch:9200"]'
OPENSEARCH_USERNAME: "admin"
OPENSEARCH_PASSWORD: ${OPENSEARCH_PASSWORD}
ports:
- "5601:5601"
openrag-backend:
image: langflowai/openrag-backend:${OPENRAG_VERSION:-latest}
# build:
# context: .
# dockerfile: Dockerfile.backend
container_name: openrag-backend
depends_on:
- langflow
environment:
- OPENSEARCH_HOST=opensearch
- LANGFLOW_URL=http://langflow:7860
- LANGFLOW_PUBLIC_URL=${LANGFLOW_PUBLIC_URL}
- LANGFLOW_AUTO_LOGIN=${LANGFLOW_AUTO_LOGIN}
- LANGFLOW_SECRET_KEY=${LANGFLOW_SECRET_KEY}
- LANGFLOW_SUPERUSER=${LANGFLOW_SUPERUSER}
- LANGFLOW_SUPERUSER_PASSWORD=${LANGFLOW_SUPERUSER_PASSWORD}
- LANGFLOW_CHAT_FLOW_ID=${LANGFLOW_CHAT_FLOW_ID}
- LANGFLOW_INGEST_FLOW_ID=${LANGFLOW_INGEST_FLOW_ID}
- LANGFLOW_URL_INGEST_FLOW_ID=${LANGFLOW_URL_INGEST_FLOW_ID}
- DISABLE_INGEST_WITH_LANGFLOW=${DISABLE_INGEST_WITH_LANGFLOW:-false}
- NUDGES_FLOW_ID=${NUDGES_FLOW_ID}
- OPENSEARCH_PORT=9200
- OPENSEARCH_USERNAME=admin
- OPENSEARCH_PASSWORD=${OPENSEARCH_PASSWORD}
- OPENAI_API_KEY=${OPENAI_API_KEY}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
- WATSONX_API_KEY=${WATSONX_API_KEY}
- WATSONX_ENDPOINT=${WATSONX_ENDPOINT}
- WATSONX_PROJECT_ID=${WATSONX_PROJECT_ID}
- OLLAMA_ENDPOINT=${OLLAMA_ENDPOINT}
- GOOGLE_OAUTH_CLIENT_ID=${GOOGLE_OAUTH_CLIENT_ID}
- GOOGLE_OAUTH_CLIENT_SECRET=${GOOGLE_OAUTH_CLIENT_SECRET}
- MICROSOFT_GRAPH_OAUTH_CLIENT_ID=${MICROSOFT_GRAPH_OAUTH_CLIENT_ID}
- MICROSOFT_GRAPH_OAUTH_CLIENT_SECRET=${MICROSOFT_GRAPH_OAUTH_CLIENT_SECRET}
- WEBHOOK_BASE_URL=${WEBHOOK_BASE_URL}
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
volumes:
- ./openrag-documents:/app/openrag-documents:Z
- ./keys:/app/keys:Z
- ./flows:/app/flows:U,z
- ./config:/app/config:Z
openrag-frontend:
image: langflowai/openrag-frontend:${OPENRAG_VERSION:-latest}
# build:
# context: .
# dockerfile: Dockerfile.frontend
container_name: openrag-frontend
depends_on:
- openrag-backend
environment:
- OPENRAG_BACKEND_HOST=openrag-backend
ports:
- "3000:3000"
langflow:
volumes:
- ./flows:/app/flows:U,z
image: langflowai/openrag-langflow:${LANGFLOW_VERSION:-latest}
# build:
# context: .
# dockerfile: Dockerfile.langflow
container_name: langflow
ports:
- "7860:7860"
environment:
- LANGFLOW_DEACTIVATE_TRACING=true
- OPENAI_API_KEY=${OPENAI_API_KEY}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
- WATSONX_API_KEY=${WATSONX_API_KEY}
- WATSONX_ENDPOINT=${WATSONX_ENDPOINT}
- WATSONX_PROJECT_ID=${WATSONX_PROJECT_ID}
- OLLAMA_BASE_URL=${OLLAMA_ENDPOINT}
- LANGFLOW_LOAD_FLOWS_PATH=/app/flows
- LANGFLOW_SECRET_KEY=${LANGFLOW_SECRET_KEY}
- JWT=None
- OWNER=None
- OWNER_NAME=None
- OWNER_EMAIL=None
- CONNECTOR_TYPE=system
- CONNECTOR_TYPE_URL=url
- OPENRAG-QUERY-FILTER="{}"
- OPENSEARCH_PASSWORD=${OPENSEARCH_PASSWORD}
- FILENAME=None
- MIMETYPE=None
- FILESIZE=0
- SELECTED_EMBEDDING_MODEL=${SELECTED_EMBEDDING_MODEL:-}
- LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,FILENAME,MIMETYPE,FILESIZE,SELECTED_EMBEDDING_MODEL
- LANGFLOW_LOG_LEVEL=DEBUG
- LANGFLOW_AUTO_LOGIN=${LANGFLOW_AUTO_LOGIN}
- LANGFLOW_SUPERUSER=${LANGFLOW_SUPERUSER}
- LANGFLOW_SUPERUSER_PASSWORD=${LANGFLOW_SUPERUSER_PASSWORD}
- LANGFLOW_NEW_USER_IS_ACTIVE=${LANGFLOW_NEW_USER_IS_ACTIVE}
- LANGFLOW_ENABLE_SUPERUSER_CLI=${LANGFLOW_ENABLE_SUPERUSER_CLI}
# - DEFAULT_FOLDER_NAME=OpenRAG
- HIDE_GETTING_STARTED_PROGRESS=true

View file

@ -133,7 +133,6 @@ services:
- MIMETYPE=None
- FILESIZE=0
- SELECTED_EMBEDDING_MODEL=${SELECTED_EMBEDDING_MODEL:-}
- OPENSEARCH_PASSWORD=${OPENSEARCH_PASSWORD}
- LANGFLOW_VARIABLES_TO_GET_FROM_ENVIRONMENT=JWT,OPENRAG-QUERY-FILTER,OPENSEARCH_PASSWORD,OWNER,OWNER_NAME,OWNER_EMAIL,CONNECTOR_TYPE,FILENAME,MIMETYPE,FILESIZE,SELECTED_EMBEDDING_MODEL,OPENAI_API_KEY,ANTHROPIC_API_KEY,WATSONX_API_KEY,WATSONX_ENDPOINT,WATSONX_PROJECT_ID,OLLAMA_BASE_URL
- LANGFLOW_LOG_LEVEL=DEBUG
- LANGFLOW_AUTO_LOGIN=${LANGFLOW_AUTO_LOGIN}

View file

@ -114,31 +114,28 @@ The following variables are required or recommended:
PID: 27746
```
3. Deploy the OpenRAG containers locally using the appropriate Docker Compose file for your environment.
Both files deploy the same services.
3. Deploy the OpenRAG containers locally using the appropriate Docker Compose configuration for your environment.
* [`docker-compose.yml`](https://github.com/langflow-ai/openrag/blob/main/docker-compose.yml): If your host machine has an NVIDIA GPU with CUDA support and compatible NVIDIA drivers, you can use this file to deploy OpenRAG with accelerated processing.
* **GPU-accelerated deployment**: If your host machine has an NVIDIA GPU with CUDA support and compatible NVIDIA drivers, use the base `docker-compose.yml` file with the `docker-compose.gpu.yml` override.
```bash title="Docker"
docker compose -f docker-compose.yml -f docker-compose.gpu.yml up -d
```
```bash title="Podman"
podman compose -f docker-compose.yml -f docker-compose.gpu.yml up -d
```
* **CPU-only deployment** (default): If your host machine doesn't have NVIDIA GPU support, use the base `docker-compose.yml` file.
```bash title="Docker"
docker compose build
docker compose up -d
```
```bash title="Podman"
podman compose build
podman compose up -d
```
* [`docker-compose-cpu.yml`](https://github.com/langflow-ai/openrag/blob/main/docker-compose-cpu.yml): If your host machine doesn't have NVIDIA GPU support, use this file for a CPU-only OpenRAG deployment.
```bash title="Docker"
docker compose -f docker-compose-cpu.yml up -d
```
```bash title="Podman"
podman compose -f docker-compose-cpu.yml up -d
```
4. Wait for the OpenRAG containers to start, and then confirm that all containers are running:
```bash title="Docker"

View file

@ -38,7 +38,7 @@ For more information about this variable and how this variable controls Langflow
## Container out of memory errors
Increase Docker memory allocation or use [docker-compose-cpu.yml](https://github.com/langflow-ai/openrag/blob/main/docker-compose-cpu.yml) to deploy OpenRAG.
Increase Docker memory allocation or use the CPU-only deployment (base `docker-compose.yml` without GPU override) to reduce memory usage.
## Memory issue with Podman on macOS