From 2d24b027c9c2d8d9c4adc9ea33af200e08ec8c63 Mon Sep 17 00:00:00 2001
From: Mendon Kissling <59585235+mendonk@users.noreply.github.com>
Date: Tue, 28 Oct 2025 09:29:46 -0400
Subject: [PATCH 01/14] perf-test
---
docs/docs/get-started/what-is-openrag.mdx | 38 ++++++++++++++++++++++-
1 file changed, 37 insertions(+), 1 deletion(-)
diff --git a/docs/docs/get-started/what-is-openrag.mdx b/docs/docs/get-started/what-is-openrag.mdx
index bae90d26..ea907cb3 100644
--- a/docs/docs/get-started/what-is-openrag.mdx
+++ b/docs/docs/get-started/what-is-openrag.mdx
@@ -3,6 +3,9 @@ title: What is OpenRAG?
slug: /
---
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
OpenRAG is an open-source package for building agentic RAG systems that integrates with a wide range of orchestration tools, vector databases, and LLM providers.
OpenRAG connects and amplifies three popular, proven open-source projects into one powerful platform:
@@ -83,4 +86,37 @@ The **OpenRAG Backend** is the central orchestration service that coordinates al
**Third Party Services** like **Google Drive** connect to the **OpenRAG Backend** through OAuth authentication, allowing synchronication of cloud storage with the OpenSearch knowledge base.
-The **OpenRAG Frontend** provides the user interface for interacting with the system.
\ No newline at end of file
+The **OpenRAG Frontend** provides the user interface for interacting with the system.
+
+## Performance expectations
+
+On a local VM with 7 vCPUs and 8 GiB RAM, OpenRAG ingested ~5.03 GB across 1,083 files in ~42 minutes, or ~2.4 documents per second. You can generally expect equal or better performance on developer laptops and significantly faster on servers. Throughput scales with CPU cores, memory, storage speed, and configuration choices such as embedding model, chunk size and overlap, and concurrency.
+
+The 12 errors (~1.1%) were file‑specific and did not stop the pipeline.
+
+Ingestion dataset:
+
+* Total files: 1,083 items mounted
+* Total size on disk: 5,026,474,862 bytes (~5.03 GB)
+
+Hardware specifications:
+
+* Machine: Apple M4 Pro
+* Podman VM:
+ * Name: `podman-machine-default`
+ * Type: `applehv`
+ * vCPUs: 7
+ * Memory: 8 GiB
+ * Disk size: 100 GiB
+
+Test results:
+
+```text
+2025-09-24T22:40:45.542190Z /app/src/main.py:231 Ingesting default documents when ready disable_langflow_ingest=False
+2025-09-24T22:40:45.546385Z /app/src/main.py:270 Using Langflow ingestion pipeline for default documents file_count=1082
+...
+2025-09-24T23:19:44.866365Z /app/src/main.py:351 Langflow ingestion completed success_count=1070 error_count=12 total_files=1082
+```
+
+* Elapsed time: ~42 minutes 15 seconds (2,535 seconds)
+* Throughput: ~2.4 documents/second
\ No newline at end of file
From b44d108efb7a780b5d6372a6055ba0ec08fe3691 Mon Sep 17 00:00:00 2001
From: Mendon Kissling <59585235+mendonk@users.noreply.github.com>
Date: Tue, 28 Oct 2025 09:30:37 -0400
Subject: [PATCH 02/14] dont-need-tabs-import
---
docs/docs/get-started/what-is-openrag.mdx | 3 ---
1 file changed, 3 deletions(-)
diff --git a/docs/docs/get-started/what-is-openrag.mdx b/docs/docs/get-started/what-is-openrag.mdx
index ea907cb3..19f89ba3 100644
--- a/docs/docs/get-started/what-is-openrag.mdx
+++ b/docs/docs/get-started/what-is-openrag.mdx
@@ -3,9 +3,6 @@ title: What is OpenRAG?
slug: /
---
-import Tabs from '@theme/Tabs';
-import TabItem from '@theme/TabItem';
-
OpenRAG is an open-source package for building agentic RAG systems that integrates with a wide range of orchestration tools, vector databases, and LLM providers.
OpenRAG connects and amplifies three popular, proven open-source projects into one powerful platform:
From af7eaddfb1f7a76fca9dfdb69fdc95698647cde5 Mon Sep 17 00:00:00 2001
From: Mendon Kissling <59585235+mendonk@users.noreply.github.com>
Date: Tue, 28 Oct 2025 09:52:03 -0400
Subject: [PATCH 03/14] use-uvx
---
docs/docs/get-started/install.mdx | 49 +++++++++++++++++++------------
1 file changed, 31 insertions(+), 18 deletions(-)
diff --git a/docs/docs/get-started/install.mdx b/docs/docs/get-started/install.mdx
index 355423cf..7326dd1e 100644
--- a/docs/docs/get-started/install.mdx
+++ b/docs/docs/get-started/install.mdx
@@ -17,7 +17,7 @@ Instead of starting OpenRAG using Docker commands and manually editing values in
Once OpenRAG is running, use the TUI to monitor your application, control your containers, and retrieve logs.
-If you prefer running Docker commands and manually editing `.env` files, see [Install with Docker](/get-started/docker).
+If you prefer running Podman or Docker containers and manually editing `.env` files, see [Install OpenRAG Containers](/get-started/docker).
## Prerequisites
@@ -30,10 +30,6 @@ If you prefer running Docker commands and manually editing `.env` files, see [In
## Install the OpenRAG Python wheel {#install-python-wheel}
-:::important
-The `.whl` file is currently available as an internal download during public preview, and will be published to PyPI in a future release.
-:::
-
The OpenRAG wheel installs the Terminal User Interface (TUI) for configuring and running OpenRAG.
1. Create a new project with a virtual environment using `uv init`.
@@ -46,26 +42,43 @@ The OpenRAG wheel installs the Terminal User Interface (TUI) for configuring and
The `(venv)` prompt doesn't change, but `uv` commands will automatically use the project's virtual environment.
For more information on virtual environments, see the [uv documentation](https://docs.astral.sh/uv/pip/environments).
-2. Add the local OpenRAG wheel to your project's virtual environment.
-
- ```bash
- uv add PATH/TO/openrag-VERSION-py3-none-any.whl
- ```
- Replace `PATH/TO/` and `VERSION` with the path and version of your downloaded OpenRAG `.whl` file.
-
- For example, if your `.whl` file is in the `~/Downloads` directory, the command is `uv add ~/Downloads/openrag-0.1.8-py3-none-any.whl`.
-
-3. Ensure all dependencies are installed and updated in your virtual environment.
+2. Ensure all dependencies are installed and updated in your virtual environment.
```bash
uv sync
```
-4. Start the OpenRAG TUI.
+3. Install and start the OpenRAG TUI.
```bash
- uv run openrag
+ uvx openrag
```
+
+ Install a local wheel without uvx
+
+ If you downloaded the OpenRAG wheel to your local machine, follow these steps:
+
+ 1. Add the wheel to your project's virtual environment.
+
+ ```bash
+ uv add PATH/TO/openrag-VERSION-py3-none-any.whl
+ ```
+
+ Replace `PATH/TO/` and `VERSION` with the path and version of your downloaded OpenRAG `.whl` file.
+
+ For example, if your `.whl` file is in the `~/Downloads` directory:
+
+ ```bash
+ uv add ~/Downloads/openrag-0.1.8-py3-none-any.whl
+ ```
+
+ 2. Run OpenRAG.
+
+ ```bash
+ uv run openrag
+ ```
+
+
+4. Continue with [Set up OpenRAG with the TUI](#setup).
-5. Continue with [Set up OpenRAG with the TUI](#setup).
## Set up OpenRAG with the TUI {#setup}
From c9d4b98069aaf54573e415e230e132bf477e1b1d Mon Sep 17 00:00:00 2001
From: Mendon Kissling <59585235+mendonk@users.noreply.github.com>
Date: Tue, 28 Oct 2025 10:02:07 -0400
Subject: [PATCH 04/14] install-specific-version
---
docs/docs/get-started/install.mdx | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/docs/docs/get-started/install.mdx b/docs/docs/get-started/install.mdx
index 7326dd1e..c2da057c 100644
--- a/docs/docs/get-started/install.mdx
+++ b/docs/docs/get-started/install.mdx
@@ -51,6 +51,9 @@ The OpenRAG wheel installs the Terminal User Interface (TUI) for configuring and
```bash
uvx openrag
```
+
+ To install a specific version of the Langflow package, add the required version to the command, such as `uvx --from openrag==0.1.25 openrag`.
+
Install a local wheel without uvx
@@ -79,7 +82,6 @@ The OpenRAG wheel installs the Terminal User Interface (TUI) for configuring and
4. Continue with [Set up OpenRAG with the TUI](#setup).
-
## Set up OpenRAG with the TUI {#setup}
The TUI creates a `.env` file in your OpenRAG directory root and starts OpenRAG.
From 728228ad3a69b1b4bbb7d271edc13a6aba5ccd50 Mon Sep 17 00:00:00 2001
From: Mendon Kissling <59585235+mendonk@users.noreply.github.com>
Date: Tue, 28 Oct 2025 10:24:40 -0400
Subject: [PATCH 05/14] uvx-as-quickstart
---
docs/docs/get-started/install.mdx | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/docs/docs/get-started/install.mdx b/docs/docs/get-started/install.mdx
index c2da057c..d8fa252f 100644
--- a/docs/docs/get-started/install.mdx
+++ b/docs/docs/get-started/install.mdx
@@ -32,6 +32,10 @@ If you prefer running Podman or Docker containers and manually editing `.env` fi
The OpenRAG wheel installs the Terminal User Interface (TUI) for configuring and running OpenRAG.
+To quickly install and start OpenRAG, run `uvx openrag`.
+
+To first set up a project and then install OpenRAG, do the following:
+
1. Create a new project with a virtual environment using `uv init`.
```bash
From c53bffd536fcb2151a84c37b543b8c8bd5f1bdf4 Mon Sep 17 00:00:00 2001
From: Mendon Kissling <59585235+mendonk@users.noreply.github.com>
Date: Tue, 28 Oct 2025 10:47:56 -0400
Subject: [PATCH 06/14] update-readme-with-uvx-and-move-docker-steps-to-docs
---
README.md | 111 ++++++++++++------------------------------------------
1 file changed, 24 insertions(+), 87 deletions(-)
diff --git a/README.md b/README.md
index f3b3e1d4..47bf2f2a 100644
--- a/README.md
+++ b/README.md
@@ -28,100 +28,37 @@ OpenRAG is a comprehensive Retrieval-Augmented Generation platform that enables
Use the OpenRAG Terminal User Interface (TUI) to manage your OpenRAG installation without complex command-line operations.
-To launch OpenRAG with the TUI, do the following:
+To quickly install and start OpenRAG, run `uvx openrag`.
-1. Clone the OpenRAG repository.
- ```bash
- git clone https://github.com/langflow-ai/openrag.git
- cd openrag
- ```
+To first set up a project and then install OpenRAG, do the following:
-2. To start the TUI, from the repository root, run:
- ```bash
- # Install dependencies first
- uv sync
-
- # Launch the TUI
- uv run openrag
- ```
+1. Create a new project with a virtual environment using `uv init`.
- The TUI opens and guides you through OpenRAG setup.
+ ```bash
+ uv init YOUR_PROJECT_NAME
+ cd YOUR_PROJECT_NAME
+ ```
+
+ The `(venv)` prompt doesn't change, but `uv` commands will automatically use the project's virtual environment.
+ For more information on virtual environments, see the [uv documentation](https://docs.astral.sh/uv/pip/environments).
+
+2. Ensure all dependencies are installed and updated in your virtual environment.
+ ```bash
+ uv sync
+ ```
+
+3. Install and start the OpenRAG TUI.
+ ```bash
+ uvx openrag
+ ```
+
+ To install a specific version of the Langflow package, add the required version to the command, such as `uvx --from openrag==0.1.25 openrag`.
For the full TUI installation guide, see [TUI](https://docs.openr.ag/install).
-## Docker installation
+## Docker or Podman installation
-If you prefer to use Docker to run OpenRAG, the repository includes two Docker Compose `.yml` files.
-They deploy the same applications and containers locally, but to different environments.
-
-- [`docker-compose.yml`](https://github.com/langflow-ai/openrag/blob/main/docker-compose.yml) is an OpenRAG deployment for environments with GPU support. GPU support requires an NVIDIA GPU with CUDA support and compatible NVIDIA drivers installed on the OpenRAG host machine.
-
-- [`docker-compose-cpu.yml`](https://github.com/langflow-ai/openrag/blob/main/docker-compose-cpu.yml) is a CPU-only version of OpenRAG for systems without GPU support. Use this Docker compose file for environments where GPU drivers aren't available.
-
-Both Docker deployments depend on `docling serve` to be running on port `5001` on the host machine. This enables [Mac MLX](https://opensource.apple.com/projects/mlx/) support for document processing. Installing OpenRAG with the TUI starts `docling serve` automatically, but for a Docker deployment you must manually start the `docling serve` process.
-
-To install OpenRAG with Docker:
-
-1. Clone the OpenRAG repository.
- ```bash
- git clone https://github.com/langflow-ai/openrag.git
- cd openrag
- ```
-
-2. Install dependencies.
- ```bash
- uv sync
- ```
-
-3. Start `docling serve` on the host machine.
- ```bash
- uv run python scripts/docling_ctl.py start --port 5001
- ```
-
-4. Confirm `docling serve` is running.
- ```
- uv run python scripts/docling_ctl.py status
- ```
-
- Successful result:
- ```bash
- Status: running
- Endpoint: http://127.0.0.1:5001
- Docs: http://127.0.0.1:5001/docs
- PID: 27746
- ```
-
-5. Build and start all services.
-
- For the GPU-accelerated deployment, run:
- ```bash
- docker compose build
- docker compose up -d
- ```
-
- For environments without GPU support, run:
- ```bash
- docker compose -f docker-compose-cpu.yml up -d
- ```
-
- The OpenRAG Docker Compose file starts five containers:
- | Container Name | Default Address | Purpose |
- |---|---|---|
- | OpenRAG Backend | http://localhost:8000 | FastAPI server and core functionality. |
- | OpenRAG Frontend | http://localhost:3000 | React web interface for users. |
- | Langflow | http://localhost:7860 | AI workflow engine and flow management. |
- | OpenSearch | http://localhost:9200 | Vector database for document storage. |
- | OpenSearch Dashboards | http://localhost:5601 | Database administration interface. |
-
-6. Access the OpenRAG application at `http://localhost:3000` and continue with the [Quickstart](https://docs.openr.ag/quickstart).
-
- To stop `docling serve`, run:
-
- ```bash
- uv run python scripts/docling_ctl.py stop
- ```
-
-For more information, see [Install with Docker](https://docs.openr.ag/get-started/docker).
+For more information, see [Install OpenRAG containers](https://docs.openr.ag/get-started/docker).
## Troubleshooting
From dc55671191421a60ab2895cc57213175d6f4a985 Mon Sep 17 00:00:00 2001
From: phact
Date: Tue, 28 Oct 2025 13:26:40 -0400
Subject: [PATCH 07/14] windows check
---
src/tui/main.py | 9 +++++++++
src/tui/screens/diagnostics.py | 19 +++++++++++++++++++
src/tui/utils/platform.py | 33 +++++++++++++++++++++++++++++++++
3 files changed, 61 insertions(+)
diff --git a/src/tui/main.py b/src/tui/main.py
index 51418786..5ddd089c 100644
--- a/src/tui/main.py
+++ b/src/tui/main.py
@@ -368,6 +368,15 @@ class OpenRAGTUI(App):
def on_mount(self) -> None:
"""Initialize the application."""
+ # Check if running on native Windows and recommend WSL
+ if self.platform_detector.is_native_windows():
+ notify_with_diagnostics(
+ self,
+ "Running on native Windows. For best experience, use WSL. See diagnostics for details.",
+ severity="warning",
+ timeout=15,
+ )
+
# Check for runtime availability and show appropriate screen
if not self.container_manager.is_available():
notify_with_diagnostics(
diff --git a/src/tui/screens/diagnostics.py b/src/tui/screens/diagnostics.py
index bad456e4..a01ae302 100644
--- a/src/tui/screens/diagnostics.py
+++ b/src/tui/screens/diagnostics.py
@@ -15,6 +15,7 @@ from rich.text import Text
from ..managers.container_manager import ContainerManager
from ..utils.clipboard import copy_text_to_clipboard
+from ..utils.platform import PlatformDetector
class DiagnosticsScreen(Screen):
@@ -52,6 +53,7 @@ class DiagnosticsScreen(Screen):
def __init__(self):
super().__init__()
self.container_manager = ContainerManager()
+ self.platform_detector = PlatformDetector()
self._logger = logging.getLogger("openrag.diagnostics")
self._status_timer = None
@@ -199,6 +201,23 @@ class DiagnosticsScreen(Screen):
"""Get system information text."""
info_text = Text()
+ # Platform information
+ info_text.append("Platform Information\n", style="bold")
+ info_text.append("=" * 30 + "\n")
+ info_text.append(f"System: {self.platform_detector.platform_system}\n")
+ info_text.append(f"Machine: {self.platform_detector.platform_machine}\n")
+
+ # Windows-specific warning
+ if self.platform_detector.is_native_windows():
+ info_text.append("\n")
+ info_text.append("⚠️ Native Windows Detected\n", style="bold yellow")
+ info_text.append("-" * 30 + "\n")
+ info_text.append(self.platform_detector.get_wsl_recommendation())
+ info_text.append("\n")
+
+ info_text.append("\n")
+
+ # Container runtime information
runtime_info = self.container_manager.get_runtime_info()
info_text.append("Container Runtime Information\n", style="bold")
diff --git a/src/tui/utils/platform.py b/src/tui/utils/platform.py
index 176c60c6..ff7805bf 100644
--- a/src/tui/utils/platform.py
+++ b/src/tui/utils/platform.py
@@ -30,6 +30,15 @@ class PlatformDetector:
self.platform_system = platform.system()
self.platform_machine = platform.machine()
+ def is_native_windows(self) -> bool:
+ """
+ Check if running on native Windows (not WSL).
+
+ Returns True if running on native Windows, False otherwise.
+ WSL environments will return False since they identify as Linux.
+ """
+ return self.platform_system == "Windows"
+
def detect_runtime(self) -> RuntimeInfo:
"""Detect available container runtime and compose capabilities."""
# First check if we have podman installed
@@ -166,6 +175,26 @@ class PlatformDetector:
) as e:
return False, 0, f"Error checking Podman VM memory: {e}"
+ def get_wsl_recommendation(self) -> str:
+ """Get recommendation message for native Windows users to use WSL."""
+ return """
+⚠️ Running on native Windows detected.
+
+For the best experience, we recommend using Windows Subsystem for Linux (WSL).
+
+To set up WSL:
+ 1. Open PowerShell or Command Prompt as Administrator
+ 2. Run: wsl --install
+ 3. Restart your computer
+ 4. Set up your Linux distribution (Ubuntu recommended)
+ 5. Install Docker or Podman in WSL
+
+Learn more: https://docs.microsoft.com/en-us/windows/wsl/install
+
+Alternatively, you can use Docker Desktop for Windows:
+ https://docs.docker.com/desktop/install/windows-install/
+"""
+
def get_installation_instructions(self) -> str:
if self.platform_system == "Darwin":
return """
@@ -200,6 +229,10 @@ Docker Desktop for Windows:
Or Podman Desktop:
https://podman-desktop.io/downloads
+
+For better performance, consider using WSL:
+ Run: wsl --install
+ https://docs.microsoft.com/en-us/windows/wsl/install
"""
else:
return """
From ac43159f18c668547c1148f15fcccb8c908c3347 Mon Sep 17 00:00:00 2001
From: Mendon Kissling <59585235+mendonk@users.noreply.github.com>
Date: Tue, 28 Oct 2025 13:43:34 -0400
Subject: [PATCH 08/14] Apply suggestion from @aimurphy
Co-authored-by: April I. Murphy <36110273+aimurphy@users.noreply.github.com>
---
docs/docs/get-started/what-is-openrag.mdx | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/docs/docs/get-started/what-is-openrag.mdx b/docs/docs/get-started/what-is-openrag.mdx
index 19f89ba3..aec21cdb 100644
--- a/docs/docs/get-started/what-is-openrag.mdx
+++ b/docs/docs/get-started/what-is-openrag.mdx
@@ -87,7 +87,11 @@ The **OpenRAG Frontend** provides the user interface for interacting with the sy
## Performance expectations
-On a local VM with 7 vCPUs and 8 GiB RAM, OpenRAG ingested ~5.03 GB across 1,083 files in ~42 minutes, or ~2.4 documents per second. You can generally expect equal or better performance on developer laptops and significantly faster on servers. Throughput scales with CPU cores, memory, storage speed, and configuration choices such as embedding model, chunk size and overlap, and concurrency.
+On a local VM with 7 vCPUs and 8 GiB RAM, OpenRAG ingested approximately 5.03 GB across 1,083 files in about 42 minutes.
+This equates to approximately 2.4 documents per second.
+
+You can generally expect equal or better performance on developer laptops and significantly faster on servers.
+Throughput scales with CPU cores, memory, storage speed, and configuration choices such as embedding model, chunk size and overlap, and concurrency.
The 12 errors (~1.1%) were file‑specific and did not stop the pipeline.
From 036f62e99adb49720fd7f69b9f153ac87236004b Mon Sep 17 00:00:00 2001
From: Mendon Kissling <59585235+mendonk@users.noreply.github.com>
Date: Tue, 28 Oct 2025 13:43:45 -0400
Subject: [PATCH 09/14] Apply suggestion from @aimurphy
Co-authored-by: April I. Murphy <36110273+aimurphy@users.noreply.github.com>
---
docs/docs/get-started/what-is-openrag.mdx | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/docs/docs/get-started/what-is-openrag.mdx b/docs/docs/get-started/what-is-openrag.mdx
index aec21cdb..e84e9fef 100644
--- a/docs/docs/get-started/what-is-openrag.mdx
+++ b/docs/docs/get-started/what-is-openrag.mdx
@@ -93,7 +93,8 @@ This equates to approximately 2.4 documents per second.
You can generally expect equal or better performance on developer laptops and significantly faster on servers.
Throughput scales with CPU cores, memory, storage speed, and configuration choices such as embedding model, chunk size and overlap, and concurrency.
-The 12 errors (~1.1%) were file‑specific and did not stop the pipeline.
+This test returned 12 errors (approximately 1.1%).
+All errors were file‑specific, and they didn't stop the pipeline.
Ingestion dataset:
From 4a4fa7b843de569191f88d0bf12fcfcfa4a5304c Mon Sep 17 00:00:00 2001
From: Mendon Kissling <59585235+mendonk@users.noreply.github.com>
Date: Tue, 28 Oct 2025 13:43:54 -0400
Subject: [PATCH 10/14] Apply suggestion from @aimurphy
Co-authored-by: April I. Murphy <36110273+aimurphy@users.noreply.github.com>
---
docs/docs/get-started/what-is-openrag.mdx | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/docs/get-started/what-is-openrag.mdx b/docs/docs/get-started/what-is-openrag.mdx
index e84e9fef..6b6f71c8 100644
--- a/docs/docs/get-started/what-is-openrag.mdx
+++ b/docs/docs/get-started/what-is-openrag.mdx
@@ -99,7 +99,7 @@ All errors were file‑specific, and they didn't stop the pipeline.
Ingestion dataset:
* Total files: 1,083 items mounted
-* Total size on disk: 5,026,474,862 bytes (~5.03 GB)
+* Total size on disk: 5,026,474,862 bytes (approximately 5.03 GB)
Hardware specifications:
From 7347180440a38222ca428e64262aed0f14e9f69b Mon Sep 17 00:00:00 2001
From: Mendon Kissling <59585235+mendonk@users.noreply.github.com>
Date: Tue, 28 Oct 2025 13:44:02 -0400
Subject: [PATCH 11/14] Apply suggestion from @aimurphy
Co-authored-by: April I. Murphy <36110273+aimurphy@users.noreply.github.com>
---
docs/docs/get-started/what-is-openrag.mdx | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/docs/docs/get-started/what-is-openrag.mdx b/docs/docs/get-started/what-is-openrag.mdx
index 6b6f71c8..129d2df9 100644
--- a/docs/docs/get-started/what-is-openrag.mdx
+++ b/docs/docs/get-started/what-is-openrag.mdx
@@ -120,5 +120,6 @@ Test results:
2025-09-24T23:19:44.866365Z /app/src/main.py:351 Langflow ingestion completed success_count=1070 error_count=12 total_files=1082
```
-* Elapsed time: ~42 minutes 15 seconds (2,535 seconds)
-* Throughput: ~2.4 documents/second
\ No newline at end of file
+Elapsed time: ~42 minutes 15 seconds (2,535 seconds)
+
+Throughput: ~2.4 documents/second
\ No newline at end of file
From ceb426e1c0c1ac4db8a0b0c576405350af4ad4ce Mon Sep 17 00:00:00 2001
From: phact
Date: Tue, 28 Oct 2025 13:59:19 -0400
Subject: [PATCH 12/14] exit
---
src/tui/main.py | 15 ++++++++-------
1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/src/tui/main.py b/src/tui/main.py
index 5ddd089c..417279c9 100644
--- a/src/tui/main.py
+++ b/src/tui/main.py
@@ -368,14 +368,15 @@ class OpenRAGTUI(App):
def on_mount(self) -> None:
"""Initialize the application."""
- # Check if running on native Windows and recommend WSL
+ # Check if running on native Windows and exit
if self.platform_detector.is_native_windows():
- notify_with_diagnostics(
- self,
- "Running on native Windows. For best experience, use WSL. See diagnostics for details.",
- severity="warning",
- timeout=15,
- )
+ print("\n" + "=" * 60)
+ print("⚠️ Native Windows Not Supported")
+ print("=" * 60)
+ print(self.platform_detector.get_wsl_recommendation())
+ print("=" * 60 + "\n")
+ self.exit(1)
+ return
# Check for runtime availability and show appropriate screen
if not self.container_manager.is_available():
From a9ac9d089411c5478c1eb0ddb2dd9a8bf0fd65bd Mon Sep 17 00:00:00 2001
From: phact
Date: Tue, 28 Oct 2025 14:02:13 -0400
Subject: [PATCH 13/14] message
---
src/tui/main.py | 22 ++++++++++++----------
1 file changed, 12 insertions(+), 10 deletions(-)
diff --git a/src/tui/main.py b/src/tui/main.py
index 417279c9..e0f60df2 100644
--- a/src/tui/main.py
+++ b/src/tui/main.py
@@ -368,16 +368,6 @@ class OpenRAGTUI(App):
def on_mount(self) -> None:
"""Initialize the application."""
- # Check if running on native Windows and exit
- if self.platform_detector.is_native_windows():
- print("\n" + "=" * 60)
- print("⚠️ Native Windows Not Supported")
- print("=" * 60)
- print(self.platform_detector.get_wsl_recommendation())
- print("=" * 60 + "\n")
- self.exit(1)
- return
-
# Check for runtime availability and show appropriate screen
if not self.container_manager.is_available():
notify_with_diagnostics(
@@ -507,6 +497,18 @@ def copy_compose_files(*, force: bool = False) -> None:
def run_tui():
"""Run the OpenRAG TUI application."""
+ # Check for native Windows before launching TUI
+ from .utils.platform import PlatformDetector
+ platform_detector = PlatformDetector()
+
+ if platform_detector.is_native_windows():
+ print("\n" + "=" * 60)
+ print("⚠️ Native Windows Not Supported")
+ print("=" * 60)
+ print(platform_detector.get_wsl_recommendation())
+ print("=" * 60 + "\n")
+ sys.exit(1)
+
app = None
try:
# Keep bundled assets aligned with the packaged versions
From 6b71fe4f69d746afdb6110d950d702698a27f7d5 Mon Sep 17 00:00:00 2001
From: phact
Date: Tue, 28 Oct 2025 14:04:09 -0400
Subject: [PATCH 14/14] copy
---
src/tui/utils/platform.py | 3 ---
1 file changed, 3 deletions(-)
diff --git a/src/tui/utils/platform.py b/src/tui/utils/platform.py
index ff7805bf..5b2edbab 100644
--- a/src/tui/utils/platform.py
+++ b/src/tui/utils/platform.py
@@ -190,9 +190,6 @@ To set up WSL:
5. Install Docker or Podman in WSL
Learn more: https://docs.microsoft.com/en-us/windows/wsl/install
-
-Alternatively, you can use Docker Desktop for Windows:
- https://docs.docker.com/desktop/install/windows-install/
"""
def get_installation_instructions(self) -> str: