From 717b864fec13bc486bb6002360a6e4e8acc76882 Mon Sep 17 00:00:00 2001
From: Mendon Kissling <59585235+mendonk@users.noreply.github.com>
Date: Tue, 30 Sep 2025 10:21:42 -0400
Subject: [PATCH] partial-for-onboarding
---
docs/docs/_partial-onboarding.mdx | 51 +++++++++++++++++++++++++++++++
docs/docs/get-started/docker.mdx | 7 +++--
docs/docs/get-started/install.mdx | 41 ++-----------------------
3 files changed, 59 insertions(+), 40 deletions(-)
create mode 100644 docs/docs/_partial-onboarding.mdx
diff --git a/docs/docs/_partial-onboarding.mdx b/docs/docs/_partial-onboarding.mdx
new file mode 100644
index 00000000..aaead4ad
--- /dev/null
+++ b/docs/docs/_partial-onboarding.mdx
@@ -0,0 +1,51 @@
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+### Application onboarding
+
+The first time you start OpenRAG, whether using the TUI or a `.env` file, a `config.yaml` file is generated if OpenRAG detects one doesn't exist.
+The `config.yaml` file controls application configuration, including language model and embedding model provider, Docling ingestion settings, and API keys.
+
+Values input during onboarding can be changed later in the OpenRAG **Settings** page, except for the language model and embedding model _provider_. The provider can only be selected during onboarding, and you must use the same provider for your language model and embedding model.
+
+1. Select your language model and embedding model provider, and complete the required fields.
+ **Your provider can only be selected once, and you must use the same provider for your language model and embedding model.**
+ The language model can be changed, but the embeddings model cannot be changed.
+ To change your provider selection, you must restart OpenRAG and delete the `config.yml` file.
+
+
+
+ 2. If you already entered a value for `OPENAI_API_KEY` in the TUI in Step 5, enable **Get API key from environment variable**.
+ 3. Under **Advanced settings**, select your **Embedding Model** and **Language Model**.
+ 4. To load 2 sample PDFs, enable **Sample dataset**.
+ This is recommended, but not required.
+ 5. Click **Complete**.
+
+
+
+ 2. Complete the fields for **watsonx.ai API Endpoint**, **IBM API key**, and **IBM Project ID**.
+ These values are found in your IBM watsonx deployment.
+ 3. Under **Advanced settings**, select your **Embedding Model** and **Language Model**.
+ 4. To load 2 sample PDFs, enable **Sample dataset**.
+ This is recommended, but not required.
+ 5. Click **Complete**.
+
+
+
+ :::tip
+ Ollama is not included with OpenRAG. To install Ollama, see the [Ollama documentation](https://docs.ollama.com/).
+ :::
+ 2. Enter your Ollama server's base URL address.
+ The default Ollama server address is `http://localhost:11434`.
+ Since OpenRAG is running in a container, you may need to change `localhost` to access services outside of the container. For example, change `http://localhost:11434` to `http://host.docker.internal:11434` to connect to Ollama.
+ OpenRAG automatically sends a test connection to your Ollama server to confirm connectivity.
+ 3. Select the **Embedding Model** and **Language Model** your Ollama server is running.
+ OpenRAG automatically lists the available models from your Ollama server.
+ 4. To load 2 sample PDFs, enable **Sample dataset**.
+ This is recommended, but not required.
+ 5. Click **Complete**.
+
+
+
+
+6. Continue with the [Quickstart](/quickstart).
\ No newline at end of file
diff --git a/docs/docs/get-started/docker.mdx b/docs/docs/get-started/docker.mdx
index 594a26b9..219e9814 100644
--- a/docs/docs/get-started/docker.mdx
+++ b/docs/docs/get-started/docker.mdx
@@ -3,6 +3,8 @@ title: Docker deployment
slug: /get-started/docker
---
+import PartialOnboarding from '@site/docs/_partial-onboarding.mdx';
+
There are two different Docker Compose files.
They deploy the same applications and containers, but to different environments.
@@ -34,7 +36,6 @@ To install OpenRAG with Docker Compose:
```bash
OPENSEARCH_PASSWORD=your_secure_password
OPENAI_API_KEY=your_openai_api_key
-
LANGFLOW_SUPERUSER=admin
LANGFLOW_SUPERUSER_PASSWORD=your_langflow_password
LANGFLOW_SECRET_KEY=your_secret_key
@@ -75,7 +76,9 @@ To install OpenRAG with Docker Compose:
- **Backend API**: http://localhost:8000
- **Langflow**: http://localhost:7860
-Continue with the [Quickstart](/quickstart).
+6. To use the OpenRAG application and continue with application onboarding, access the frontend at `http://localhost:3000`.
+
+
## Rebuild all Docker containers
diff --git a/docs/docs/get-started/install.mdx b/docs/docs/get-started/install.mdx
index 27cafb44..040f555f 100644
--- a/docs/docs/get-started/install.mdx
+++ b/docs/docs/get-started/install.mdx
@@ -5,6 +5,7 @@ slug: /install
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
+import PartialOnboarding from '@site/docs/_partial-onboarding.mdx';
OpenRAG can be installed in multiple ways:
@@ -79,46 +80,10 @@ For more information on virtual environments, see [uv](https://docs.astral.sh/uv
Command completed successfully
```
-7. To open the OpenRAG application, click **Open App**, press 6, or navigate to `http://localhost:3000`.
+7. To open the OpenRAG application and continue with application onboarding, click **Open App**, press 6, or navigate to `http://localhost:3000`.
The application opens.
-8. Select your language model and embedding model provider, and complete the required fields.
- **Your provider can only be selected once, and you must use the same provider for your language model and embedding model.**
- The language model can be changed, but the embeddings model cannot be changed.
- To change your provider selection, you must restart OpenRAG and delete the `config.yml` file.
-
-
- 9. If you already entered a value for `OPENAI_API_KEY` in the TUI in Step 5, enable **Get API key from environment variable**.
- 10. Under **Advanced settings**, select your **Embedding Model** and **Language Model**.
- 11. To load 2 sample PDFs, enable **Sample dataset**.
- This is recommended, but not required.
- 12. Click **Complete**.
-
-
-
- 9. Complete the fields for **watsonx.ai API Endpoint**, **IBM API key**, and **IBM Project ID**.
- These values are found in your IBM watsonx deployment.
- 10. Under **Advanced settings**, select your **Embedding Model** and **Language Model**.
- 11. To load 2 sample PDFs, enable **Sample dataset**.
- This is recommended, but not required.
- 12. Click **Complete**.
-
-
-
- 9. Enter your Ollama server's base URL address.
- The default Ollama server address is `http://localhost:11434`.
- Since OpenRAG is running in a container, you may need to change `localhost` to access services outside of the container. For example, change `http://localhost:11434` to `http://host.docker.internal:11434` to connect to Ollama.
- OpenRAG automatically sends a test connection to your Ollama server to confirm connectivity.
- 10. Select the **Embedding Model** and **Language Model** your Ollama server is running.
- OpenRAG automatically lists the available models from your Ollama server.
- 11. To load 2 sample PDFs, enable **Sample dataset**.
- This is recommended, but not required.
- 12. Click **Complete**.
-
-
-
-
-13. Continue with the [Quickstart](/quickstart).
+
### Advanced Setup {#advanced-setup}