From 00ce78df5c163d966c906205547f379ecc71b106 Mon Sep 17 00:00:00 2001
From: April M <36110273+aimurphy@users.noreply.github.com>
Date: Fri, 16 Jan 2026 10:47:32 -0800
Subject: [PATCH 1/7] issue 566
---
docs/docs/_partial-integrate-chat.mdx | 2 +-
docs/docs/_partial-temp-knowledge.mdx | 2 +-
docs/docs/core-components/agents.mdx | 23 ++++----
.../core-components/knowledge-filters.mdx | 46 ++++++++++------
docs/docs/core-components/knowledge.mdx | 52 ++++++++++++++-----
5 files changed, 85 insertions(+), 40 deletions(-)
diff --git a/docs/docs/_partial-integrate-chat.mdx b/docs/docs/_partial-integrate-chat.mdx
index a7bd565d..fa302b55 100644
--- a/docs/docs/_partial-integrate-chat.mdx
+++ b/docs/docs/_partial-integrate-chat.mdx
@@ -2,7 +2,7 @@ import Icon from "@site/src/components/icon/icon";
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
-1. Open the **OpenRAG OpenSearch Agent** flow in the Langflow visual editor: From the **Chat** window, click **Settings**, click **Edit in Langflow**, and then click **Proceed**.
+1. Open the **OpenRAG OpenSearch Agent** flow in the Langflow visual editor: Click **Settings**, click **Edit in Langflow**, and then click **Proceed**.
2. Optional: If you don't want to use the Langflow API key that is generated automatically when you install OpenRAG, you can create a [Langflow API key](https://docs.langflow.org/api-keys-and-authentication).
This key doesn't grant access to OpenRAG; it is only for authenticating with the Langflow API.
diff --git a/docs/docs/_partial-temp-knowledge.mdx b/docs/docs/_partial-temp-knowledge.mdx
index 7ecdf99c..da537c1e 100644
--- a/docs/docs/_partial-temp-knowledge.mdx
+++ b/docs/docs/_partial-temp-knowledge.mdx
@@ -1,5 +1,5 @@
import Icon from "@site/src/components/icon/icon";
-When using the OpenRAG **Chat**, click in the chat input field to upload a file to the current chat session.
+When using the OpenRAG **Chat**, click **Add** in the chat input field to upload a file to the current chat session.
Files added this way are processed and made available to the agent for the current conversation only.
These files aren't stored in the knowledge base permanently.
\ No newline at end of file
diff --git a/docs/docs/core-components/agents.mdx b/docs/docs/core-components/agents.mdx
index 32387a59..e902f10f 100644
--- a/docs/docs/core-components/agents.mdx
+++ b/docs/docs/core-components/agents.mdx
@@ -21,14 +21,18 @@ You can customize these flows and create your own flows using OpenRAG's embedded
All OpenRAG flows are designed to be modular, performant, and provider-agnostic.
To modify a flow in OpenRAG, click **Settings**.
-From here, you can quickly edit commonly used parameters, such as the **Language model** and **Agent Instructions**.
-To further explore and edit the flow, click **Edit in Langflow** to launch the embedded [Langflow visual editor](https://docs.langflow.org/concepts-overview) where you can fully [customize the flow](https://docs.langflow.org/concepts-flows) to suit your use case.
+From here, you can manage model provider configurations and edit commonly used parameters, such as the **Language model** and **Agent Instructions**.
+To further explore and edit a flow, click **Edit in Langflow** to launch the embedded [Langflow visual editor](https://docs.langflow.org/concepts-overview) where you can fully [customize the flow](https://docs.langflow.org/concepts-flows) to suit your use case.
-For example, to view and edit the built-in **Chat** flow (the **OpenRAG OpenSearch Agent** flow), do the following:
+For example, the following steps explain how to view and edit the built-in **Agent** flow, which is the **OpenRAG OpenSearch Agent** flow used for the OpenRAG **Chat**:
-1. In OpenRAG, click **Chat**.
+1. In OpenRAG, click **Settings**, and then find the **Agent** section.
-2. Click **Settings**, and then click **Edit in Langflow** to launch the Langflow visual editor in a new browser window.
+2. If you only need to edit the language model or agent instructions, edit those fields directly on the **Settings** page.
+Language model changes are saved automatically.
+To apply new instructions, click **Save Agent Instructions**.
+
+3. To edit all flow settings and components with full customization capabilities, click **Edit in Langflow** to launch the Langflow visual editor in a new browser tab.
If prompted to acknowledge that you are entering Langflow, click **Proceed**.
@@ -36,13 +40,12 @@ For example, to view and edit the built-in **Chat** flow (the **OpenRAG OpenSear

-3. Modify the flow as desired, and then press Command+S (Ctrl+S) to save your changes.
+4. Modify the flow as desired, and then press Command+S (Ctrl+S) to save your changes.
- You can close the Langflow browser window, or leave it open if you want to continue experimenting with the flow editor.
+ You can close the Langflow browser tab, or leave it open if you want to continue experimenting with the flow editor.
- :::tip
- If you modify the built-in **Chat** flow, make sure you click in the **Conversations** tab to start a new conversation. This ensures that the chat doesn't persist any context from the previous conversation with the original flow settings.
- :::
+5. After you modify any **Agent** flow settings, go to the OpenRAG **Chat**, and then click **Start new conversation** in the **Conversations** list.
+This ensures that the chat doesn't persist any context from the previous conversation with the original flow settings.
### Revert a built-in flow to its original configuration {#revert-a-built-in-flow-to-its-original-configuration}
diff --git a/docs/docs/core-components/knowledge-filters.mdx b/docs/docs/core-components/knowledge-filters.mdx
index 2197b5f7..f2a2c2b6 100644
--- a/docs/docs/core-components/knowledge-filters.mdx
+++ b/docs/docs/core-components/knowledge-filters.mdx
@@ -26,36 +26,52 @@ After uploading your own documents, it is recommended that you create your own f
To create a knowledge filter, do the following:
-1. Click **Knowledge**, and then click **Knowledge Filters**.
+1. Click **Knowledge**, and then click **Knowledge Filters**.
-2. Enter a **Name** and **Description**, and then click **Create Filter**.
+2. Enter a **Name**.
- By default, new filters match all documents in your knowledge base.
- Modify the filter to customize it.
+3. Optional: Click the filter icon next to the filter name to select a different icon and color for the filter.
+This is purely cosmetic, but it can help you visually distinguish different sets of filters, such as different projects or sources.
-3. To modify the filter, click **Knowledge**, and then click your new filter. You can edit the following settings:
+4. Optional: Enter a **Description**.
- * **Search Query**: Enter text for semantic search, such as `financial reports from Q4`.
- * **Data Sources**: Select specific data sources or folders to include.
+5. Customize the filter settings.
+
+ By default, filters match all documents in your knowledge base.
+ Use the filter settings to narrow the scope of documents that the filter captures:
+
+ * **Search Query**: Enter a natural language text string for semantic search.
+ When you apply a filter that has a **Search Query**, only documents matching the search query are included.
+ It is recommended that you use the **Score Threshold** setting to avoid returning irrelevant documents.
+ * **Data Sources**: Select specific files and folders to include in the filter.
+ This is useful if you want to create a filter for a specific project or topic and you know the specific documents you want to include.
+ Similarly, if you upload a folder of documents, you might want to create a filter that only includes the documents from that folder.
* **Document Types**: Filter by file type.
* **Owners**: Filter by the user that uploaded the documents.
+ **Anonymous User** means a document was uploaded in an OpenRAG environment where OAuth isn't configured.
* **Connectors**: Filter by [upload source](/ingestion), such as the local file system or a Google Drive OAuth connector.
- * **Response Limit**: Set the maximum number of results to return from the knowledge base. The default is `10`.
- * **Score Threshold**: Set the minimum relevance score for similarity search. The default score is `0`.
+ * **Response Limit**: Set the maximum number of results to return from the knowledge base. The default is `10`, which means the filter returns only the top 10 most relevant documents.
+ * **Score Threshold**: Set the minimum relevance score for similarity search. The default score is `0`. A threshold is recommended to avoid returned irrelevant documents.
-4. To save your changes, click **Update Filter**.
+6. Click **Create Filter**.
+
+## Edit a filter
+
+To modify a filter, click **Knowledge**, and then click the filter you want to edit in the **Knowledge Filters** list.
+On the filter settings pane, edit the filter as desired, and then click **Update Filter**.
## Apply a filter {#apply-a-filter}
-* **Apply a global filter**: Click **Knowledge**, and then enable the toggle next to your preferred filter. Only one filter can be the global filter. The global filter applies to all chat sessions.
-
-* **Apply a chat filter**: In the **Chat** window, click **Filter**, and then select the filter to apply.
+In the OpenRAG **Chat**, click **Filter**, and then select the filter to apply.
Chat filters apply to one chat session only.
+You can also use filters when [browsing the **Knowledge** page](/knowledge#browse-knowledge).
+This is a helpful way to test filters and manage knowledge bases that have many documents.
+
## Delete a filter
1. Click **Knowledge**.
-2. Click the filter that you want to delete.
+2. In the **Knowledge Filters** list, click the filter that you want to delete.
-3. Click **Delete Filter**.
\ No newline at end of file
+3. In the filter settings pane, click **Delete Filter**.
\ No newline at end of file
diff --git a/docs/docs/core-components/knowledge.mdx b/docs/docs/core-components/knowledge.mdx
index 1a603bd0..7d6a527b 100644
--- a/docs/docs/core-components/knowledge.mdx
+++ b/docs/docs/core-components/knowledge.mdx
@@ -23,7 +23,22 @@ You can configure how documents are ingested and how the **Chat** interacts with
The **Knowledge** page lists the documents OpenRAG has ingested into your OpenSearch database, specifically in an [OpenSearch index](https://docs.opensearch.org/latest/getting-started/intro/#index) named `documents`.
To explore the raw contents of your knowledge base, click **Knowledge** to get a list of all ingested documents.
-Click a document to view the chunks produced from splitting the document during ingestion.
+
+Click a document to view the chunks produced from splitting the document during ingestion as well as technical details related to chunking.
+
+For each document, the **Knowledge** page provides metadata, including the size, type, user that uploaded the document, the number of chunks created from the document, and the embedding model and dimensions used to embed the document.
+
+The search field at the top of the **Knowledge** page allows you to search for specific documents by name, contents, or with a knowledge filter:
+
+* To search with a text string, enter your search string in the search field, and then press Enter.
+
+* To apply a [knowledge filter](/knowledge-filters) when browsing your knowledge base, click the filter in the **Knowledge Filters** list.
+The filter settings pane opens, and the filter appears in the search field.
+To remove the filter, close the filter settings pane or clear the filter from the search field.
+
+ If a knowledge filter contains a search query, that query is applied in addition to any additional string you enter in the search field.
+
+When you search, the **Avg score** column shows how relevant each document is to your search query.
### Default documents {#default-documents}
@@ -81,8 +96,10 @@ The default embedding dimension is `1536`, and the default model is the OpenAI `
If you want to use an unsupported model, you must manually set the model in your [OpenRAG `.env` file](/reference/configuration).
If you use an unsupported embedding model that doesn't have defined dimensions in `settings.py`, then OpenRAG falls back to the default dimensions (1536) and logs a warning. OpenRAG's OpenSearch instance and flows continue to work, but [similarity search](https://www.ibm.com/think/topics/vector-search) quality can be affected if the actual model dimensions aren't 1536.
-To change the embedding model after onboarding, it is recommended that you modify the embedding model setting in the OpenRAG **Settings** page or in your [OpenRAG `.env` file](/reference/configuration).
-This will automatically update all relevant [OpenRAG flows](/agents) to use the new embedding model configuration.
+To change the embedding model after onboarding, it is recommended that you modify the embedding model configuration on the OpenRAG **Settings** page or in your [OpenRAG `.env` file](/reference/configuration).
+This ensures that all relevant [OpenRAG flows](/agents) are updated to use the new embedding model configuration.
+
+If you edit these settings in the `.env` or `docker-compose` files, you must [stop and restart the OpenRAG containers](/manage-services#stop-and-start-containers) to apply the changes.
### Set Docling parameters
@@ -90,32 +107,36 @@ OpenRAG uses [Docling](https://docling-project.github.io/docling/) for document
When you [upload documents](/ingestion), Docling processes the files, splits them into chunks, and stores them as separate, structured documents in your OpenSearch knowledge base.
+#### Select a Docling implementation
+
You can use either Docling Serve or OpenRAG's built-in Docling ingestion pipeline to process documents.
* **Docling Serve ingestion**: By default, OpenRAG uses [Docling Serve](https://github.com/docling-project/docling-serve).
This means that OpenRAG starts a `docling serve` process on your local machine and runs Docling ingestion through an API service.
* **Built-in Docling ingestion**: If you want to use OpenRAG's built-in Docling ingestion pipeline instead of the separate Docling Serve service, set `DISABLE_INGEST_WITH_LANGFLOW=true` in your [OpenRAG environment variables](/reference/configuration#document-processing-settings).
+The built-in pipeline uses the Docling processor directly instead of through the Docling Serve API.
+For the underlying functionality, see [`processors.py`](https://github.com/langflow-ai/openrag/blob/main/src/models/processors.py#L58) in the OpenRAG repository.
- The built-in pipeline uses the Docling processor directly instead of through the Docling Serve API.
+#### Configure Docling ingestion settings
- For the underlying functionality, see [`processors.py`](https://github.com/langflow-ai/openrag/blob/main/src/models/processors.py#L58) in the OpenRAG repository.
-
-To modify the Docling ingestion and embedding parameters, click **Settings** in the OpenRAG user interface.
+To modify the Docling document processing and embedding parameters, click **Settings** in OpenRAG, and then find the **Knowledge Ingest** section.
:::tip
-OpenRAG warns you if `docling serve` isn't running.
+The TUI warns you if `docling serve` isn't running.
For information about starting and stopping OpenRAG native services, like Docling, see [Manage OpenRAG services](/manage-services).
:::
+You can edit the following parameters:
+
* **Embedding model**: Select the model to use to generate vector embeddings for your documents.
This is initially set during installation.
- The recommended way to change this setting is in the OpenRAG **Settings** or your [OpenRAG `.env` file](/reference/configuration).
- This will automatically update all relevant [OpenRAG flows](/agents) to use the new embedding model configuration.
+ The recommended way to change this setting is in the OpenRAG **Settings** or your [OpenRAG `.env` file](/reference/configuration).
+ This ensures that all relevant [OpenRAG flows](/agents) are updated to use the new embedding model configuration.
If you uploaded documents prior to changing the embedding model, you can [create filters](/knowledge-filters) to separate documents embedded with different models, or you can reupload all documents to regenerate embeddings with the new model.
- If you want to use multiple embeddings models, similarity search (in the **Chat**) can take longer as it searching each model's embeddings separately.
+ If you want to use multiple embeddings models, similarity search (in the **Chat**) can take longer as it searches each model's embeddings separately.
* **Chunk size**: Set the number of characters for each text chunk when breaking down a file.
Larger chunks yield more context per chunk, but can include irrelevant information. Smaller chunks yield more precise semantic search, but can lack context.
@@ -125,7 +146,7 @@ The default value is 1000 characters, which is usually a good balance between co
Use larger overlap values for documents where context is most important. Use smaller overlap values for simpler documents or when optimization is most important.
The default value is 200 characters, which represents an overlap of 20 percent if the **Chunk size** is 1000. This is suitable for general use. For faster processing, decrease the overlap to approximately 10 percent. For more complex documents where you need to preserve context across chunks, increase it to approximately 40 percent.
-* **Table Structure**: Enables Docling's [`DocumentConverter`](https://docling-project.github.io/docling/reference/document_converter/) tool for parsing tables. Instead of treating tables as plain text, tables are output as structured table data with preserved relationships and metadata. This option is enabled by default.
+* **Table structure**: Enables Docling's [`DocumentConverter`](https://docling-project.github.io/docling/reference/document_converter/) tool for parsing tables. Instead of treating tables as plain text, tables are output as structured table data with preserved relationships and metadata. This option is enabled by default.
* **OCR**: Enables Optical Character Recognition (OCR) processing when extracting text from images and ingesting scanned documents. This setting is best suited for processing text-based documents faster with Docling's [`DocumentConverter`](https://docling-project.github.io/docling/reference/document_converter/). Images are ignored and not processed.
@@ -147,7 +168,12 @@ To change this location, modify the **Documents Paths** variable in either the [
This is a destructive operation that cannot be undone.
:::
-To clear your entire knowledge base, [reset your OpenRAG containers](/manage-services#reset-containers) or [reinstall OpenRAG](/reinstall).
+To delete documents from your knowledge base, click **Knowledge**, use the checkboxes to select one or more documents, and then click **Delete**.
+If you select the checkbox at the top of the list, all documents are selected and your entire knowledge base will be deleted.
+
+To delete an individual document, you can also click **More** next to that document, and then select **Delete**.
+
+To completely clear your entire knowledge base and OpenSearch index, [reset your OpenRAG containers](/manage-services#reset-containers) or [reinstall OpenRAG](/reinstall).
## See also
From 7d22747ac8d3cb1bc093f7dfb03133343300b8bd Mon Sep 17 00:00:00 2001
From: April M <36110273+aimurphy@users.noreply.github.com>
Date: Fri, 16 Jan 2026 11:12:36 -0800
Subject: [PATCH 2/7] change chat flow to agent flow
---
docs/docs/core-components/knowledge.mdx | 2 +-
docs/docs/reference/configuration.mdx | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/docs/docs/core-components/knowledge.mdx b/docs/docs/core-components/knowledge.mdx
index 7d6a527b..6686475c 100644
--- a/docs/docs/core-components/knowledge.mdx
+++ b/docs/docs/core-components/knowledge.mdx
@@ -64,7 +64,7 @@ An [OpenSearch index](https://docs.opensearch.org/latest/getting-started/intro/#
By default, all documents you upload to your OpenRAG knowledge base are stored in an index named `documents`.
It is possible to change the index name by [editing the ingestion flow](/agents#inspect-and-modify-flows).
-However, this can impact dependent processes, such as the [filters](/knowledge-filters) and [**Chat**](/chat) flow, that reference the `documents` index by default.
+However, this can impact dependent processes, such as the [filters](/knowledge-filters) and [**Chat**](/chat), that reference the `documents` index by default.
Make sure you edit other flows as needed to ensure all processes use the same index name.
If you encounter errors or unexpected behavior after changing the index name, you can [revert the flows to their original configuration](/agents#revert-a-built-in-flow-to-its-original-configuration), or [delete knowledge](/knowledge#delete-knowledge) to clear the existing documents from your knowledge base.
diff --git a/docs/docs/reference/configuration.mdx b/docs/docs/reference/configuration.mdx
index 4e011cb4..8ba77b8f 100644
--- a/docs/docs/reference/configuration.mdx
+++ b/docs/docs/reference/configuration.mdx
@@ -96,7 +96,7 @@ For better security, it is recommended to set `LANGFLOW_SUPERUSER_PASSWORD` so t
| `LANGFLOW_SUPERUSER_PASSWORD` | Not set | Langflow administrator password. If this variable isn't set, then the Langflow server starts _without_ authentication enabled. It is recommended to set `LANGFLOW_SUPERUSER_PASSWORD` so the [Langflow server starts with authentication enabled](https://docs.langflow.org/api-keys-and-authentication#start-a-langflow-server-with-authentication-enabled). |
| `LANGFLOW_URL` | `http://localhost:7860` | URL for the Langflow instance. |
| `LANGFLOW_CHAT_FLOW_ID`, `LANGFLOW_INGEST_FLOW_ID`, `NUDGES_FLOW_ID` | Built-in flow IDs | These variables are set automatically to the IDs of the chat, ingestion, and nudges [flows](/agents). The default values are found in [`.env.example`](https://github.com/langflow-ai/openrag/blob/main/.env.example). Only change these values if you want to replace a built-in flow with your own custom flow. The flow JSON must be present in your version of the OpenRAG codebase. For example, if you [deploy self-managed services](/docker), you can add the flow JSON to your local clone of the OpenRAG repository before deploying OpenRAG. |
-| `SYSTEM_PROMPT` | `You are a helpful AI assistant with access to a knowledge base. Answer questions based on the provided context.` | System prompt instructions for the agent driving the **Chat** flow. |
+| `SYSTEM_PROMPT` | `You are a helpful AI assistant with access to a knowledge base. Answer questions based on the provided context.` | System prompt instructions for the agent driving the **Agent** flow (OpenRAG **Chat**). |
## OAuth provider settings
From f080ae0e2af2685a03065b2f34bc7a2e71ce903b Mon Sep 17 00:00:00 2001
From: April M <36110273+aimurphy@users.noreply.github.com>
Date: Fri, 16 Jan 2026 13:26:14 -0800
Subject: [PATCH 3/7] more detail about nudges flow
---
docs/docs/core-components/agents.mdx | 20 +++++++++++++++-----
docs/docs/core-components/chat.mdx | 18 ++++++++++++------
2 files changed, 27 insertions(+), 11 deletions(-)
diff --git a/docs/docs/core-components/agents.mdx b/docs/docs/core-components/agents.mdx
index e902f10f..f8df6126 100644
--- a/docs/docs/core-components/agents.mdx
+++ b/docs/docs/core-components/agents.mdx
@@ -20,11 +20,18 @@ You can customize these flows and create your own flows using OpenRAG's embedded
All OpenRAG flows are designed to be modular, performant, and provider-agnostic.
-To modify a flow in OpenRAG, click **Settings**.
-From here, you can manage model provider configurations and edit commonly used parameters, such as the **Language model** and **Agent Instructions**.
-To further explore and edit a flow, click **Edit in Langflow** to launch the embedded [Langflow visual editor](https://docs.langflow.org/concepts-overview) where you can fully [customize the flow](https://docs.langflow.org/concepts-flows) to suit your use case.
+To view and modify a flow in OpenRAG, click **Settings**.
+From here, you can manage OAuth connectors, model providers, and common parameters for the **Agent** and **Knowledge Ingestion** flows.
-For example, the following steps explain how to view and edit the built-in **Agent** flow, which is the **OpenRAG OpenSearch Agent** flow used for the OpenRAG **Chat**:
+To further explore and edit flows, click **Edit in Langflow** to launch the embedded [Langflow visual editor](https://docs.langflow.org/concepts-overview) where you can fully [customize the flow](https://docs.langflow.org/concepts-flows) to suit your use case.
+
+:::tip
+After you click **Edit in Langflow**, you can access and edit all of OpenRAG's built-in flows from the Langflow editor's [**Projects** page](https://docs.langflow.org/concepts-flows#projects).
+
+If you edit any flows other than the **Agent** or **Knowledge Ingestion** flows, it is recommended that you [export the flows](https://docs.langflow.org/concepts-flows-import) before editing so you can revert them to their original state if needed.
+:::
+
+For example, the following steps explain how to edit the built-in **Agent** flow, which is the **OpenRAG OpenSearch Agent** flow used for the OpenRAG **Chat**:
1. In OpenRAG, click **Settings**, and then find the **Agent** section.
@@ -49,9 +56,12 @@ This ensures that the chat doesn't persist any context from the previous convers
### Revert a built-in flow to its original configuration {#revert-a-built-in-flow-to-its-original-configuration}
-After you edit a built-in flow, you can click **Restore flow** on the **Settings** page to revert the flow to its original state when you first installed OpenRAG.
+After you edit the **Agent** or **Knowledge Ingestion** built-in flows, you can click **Restore flow** on the **Settings** page to revert either flow to its original state when you first installed OpenRAG.
This is a destructive action that discards all customizations to the flow.
+This option isn't available for other built-in flows such as the **Nudges** flow.
+To restore these flows to their original state, you must reimport the flow from a backup (if you exported one before editing), or [reset](/manage-services#reset-containers) or [reinstall](/reinstall) OpenRAG.
+
## Build custom flows and use other Langflow functionality
In addition to OpenRAG's built-in flows, all Langflow features are available through OpenRAG, including the ability to [create your own flows](https://docs.langflow.org/concepts-flows) and popular extensibility features such as the following:
diff --git a/docs/docs/core-components/chat.mdx b/docs/docs/core-components/chat.mdx
index 06fe60f1..9101925e 100644
--- a/docs/docs/core-components/chat.mdx
+++ b/docs/docs/core-components/chat.mdx
@@ -11,7 +11,7 @@ import PartialTempKnowledge from '@site/docs/_partial-temp-knowledge.mdx';
After you [upload documents to your knowledge base](/ingestion), you can use the OpenRAG **Chat** feature to interact with your knowledge through natural language queries.
-The OpenRAG **Chat** uses an LLM-powered agent to understand your queries, retrieve relevant information from your knowledge base, and generate context-aware responses.
+The OpenRAG **Chat** uses an LLM-powered agent to understand your queries, retrieve relevant information from your knowledge base, and generate context-aware responses.
The agent can also fetch information from URLs and new documents that you provide during the chat session.
To limit the knowledge available to the agent, use [filters](/knowledge-filters).
@@ -24,7 +24,7 @@ Try chatting, uploading documents, and modifying chat settings in the [quickstar
## OpenRAG OpenSearch Agent flow {#flow}
-When you use the OpenRAG **Chat**, the **OpenRAG OpenSearch Agent** flow runs in the background to retrieve relevant information from your knowledge base and generate a response.
+When you use the OpenRAG **Chat**, the **OpenRAG OpenSearch Agent** flow runs in the background to retrieve relevant information from your knowledge base and generate a response.
If you [inspect the flow in Langflow](/agents#inspect-and-modify-flows), you'll see that it is comprised of eight components that work together to ingest chat messages, retrieve relevant information from your knowledge base, and then generate responses.
When you inspect this flow, you can edit the components to customize the agent's behavior.
@@ -32,7 +32,7 @@ When you inspect this flow, you can edit the components to customize the agent's

* [**Chat Input** component](https://docs.langflow.org/chat-input-and-output#chat-input): This component starts the flow when it receives a chat message. It is connected to the **Agent** component's **Input** port.
-When you use the OpenRAG **Chat**, your chat messages are passed to the **Chat Input** component, which then sends them to the **Agent** component for processing.
+When you use the OpenRAG **Chat**, your chat messages are passed to the **Chat Input** component, which then sends them to the **Agent** component for processing.
* [**Agent** component](https://docs.langflow.org/components-agents): This component orchestrates the entire flow by processing chat messages, searching the knowledge base, and organizing the retrieved information into a cohesive response.
The agent's general behavior is defined by the prompt in the **Agent Instructions** field and the model connected to the **Language Model** port.
@@ -73,12 +73,18 @@ If no knowledge filter is set, then the `OPENRAG-QUERY-FILTER` variable is empty
## Nudges {#nudges}
-When you use the OpenRAG **Chat**, the **OpenRAG OpenSearch Nudges** flow runs in the background to pull additional context from your knowledge base and chat history.
+When you use the OpenRAG **Chat**, the **OpenRAG OpenSearch Nudges** flow runs in the background to pull additional context from your knowledge base and chat history.
-Nudges appear as prompts in the chat.
-Click a nudge to accept it and provide the nudge's context to the OpenRAG **Chat** agent (the **OpenRAG OpenSearch Agent** flow).
+Nudges appear as prompts in the chat, and they are based on the contents of your OpenRAG OpenSearch knowledge base.
+Click a nudge to accept it and start a chat based on the nudge.
Like OpenRAG's other built-in flows, you can [inspect the flow in Langflow](/agents#inspect-and-modify-flows), and you can customize it if you want to change the nudge behavior.
+However, this flow is specifically designed to work with the OpenRAG chat and knowledge base.
+Major changes to this flow might break the nudge functionality or produce irrelevant nudges.
+
+The **Nudges** flow consists of **Embedding model**, **Language model**, **OpenSearch**, **Input/Output*, and other components that browse your knowledge base, identify key themes and possible insights, and then produce prompts based on the findings.
+
+For example, if your knowledge base contains documents about cybersecurity, possible nudges might include `Explain zero trust architecture principles` or `How to identify a social engineering attack`.
## Upload documents to the chat
From 5c45395df5941684088111860e2a0c6610e300a7 Mon Sep 17 00:00:00 2001
From: April M <36110273+aimurphy@users.noreply.github.com>
Date: Fri, 16 Jan 2026 14:52:38 -0800
Subject: [PATCH 4/7] ingestion status
---
docs/docs/_partial-anonymous-user-owner.mdx | 1 +
docs/docs/_partial-gpu-mode-tip.mdx | 2 +-
docs/docs/core-components/ingestion.mdx | 51 +++++++++++--------
.../core-components/knowledge-filters.mdx | 24 ++++++---
docs/docs/core-components/knowledge.mdx | 48 +++++++++++++----
docs/docs/get-started/docker.mdx | 2 +-
6 files changed, 90 insertions(+), 38 deletions(-)
create mode 100644 docs/docs/_partial-anonymous-user-owner.mdx
diff --git a/docs/docs/_partial-anonymous-user-owner.mdx b/docs/docs/_partial-anonymous-user-owner.mdx
new file mode 100644
index 00000000..35544305
--- /dev/null
+++ b/docs/docs/_partial-anonymous-user-owner.mdx
@@ -0,0 +1 @@
+In no-auth mode, all documents are attributed to **Anonymous User** because there is no distinct document ownership or unique JWTs. For more control over document ownership and visibility, use OAuth mode. For more information, see [OpenSearch authentication and document access](/knowledge#auth).
\ No newline at end of file
diff --git a/docs/docs/_partial-gpu-mode-tip.mdx b/docs/docs/_partial-gpu-mode-tip.mdx
index d9d229fb..f6e672db 100644
--- a/docs/docs/_partial-gpu-mode-tip.mdx
+++ b/docs/docs/_partial-gpu-mode-tip.mdx
@@ -2,4 +2,4 @@ GPU acceleration isn't required for most use cases.
OpenRAG's CPU-only deployment doesn't prevent you from using GPU acceleration in external services, such as Ollama servers.
GPU acceleration is required only for specific use cases, typically involving customization of the ingestion flows or ingestion logic.
-For example, writing alternate ingest logic in OpenRAG that uses GPUs directly in the container, or customizing the ingestion flows to use Langflow's Docling component with GPU acceleration instead of OpenRAG's `docling serve` service.
\ No newline at end of file
+For example, writing alternate ingest logic in OpenRAG that uses GPUs directly in the container, or customizing the ingestion flows to use Langflow's Docling component with GPU acceleration instead of OpenRAG's Docling Serve service.
\ No newline at end of file
diff --git a/docs/docs/core-components/ingestion.mdx b/docs/docs/core-components/ingestion.mdx
index 5f456a7e..386ae382 100644
--- a/docs/docs/core-components/ingestion.mdx
+++ b/docs/docs/core-components/ingestion.mdx
@@ -171,26 +171,37 @@ The agent can call this component to fetch web content from a given URL, and the
Like all OpenRAG flows, you can [inspect the flow in Langflow](/agents#inspect-and-modify-flows), and you can customize it.
For more information about MCP in Langflow, see the Langflow documentation on [MCP clients](https://docs.langflow.org/mcp-client) and [MCP servers](https://docs.langflow.org/mcp-tutorial).
-## Monitor ingestion
+## Monitor ingestion {#monitor-ingestion}
-Document ingestion tasks run in the background.
+Depending on the amount of data to ingest, document ingestion can take a few seconds, minutes, or longer.
+For this reason, document ingestion tasks run in the background.
In the OpenRAG user interface, a badge is shown on **Tasks** when OpenRAG tasks are active.
-Click **Tasks** to inspect and cancel tasks:
+Click **Tasks** to inspect and cancel tasks.
+Tasks are separated into multiple sections:
-* **Active Tasks**: All tasks that are **Pending**, **Running**, or **Processing**.
-For each active task, depending on its state, you can find the task ID, start time, duration, number of files processed, and the total files enqueued for processing.
+* The **Active Tasks** section includes all tasks that are **Pending**, **Running**, or **Processing**:
-* **Pending**: The task is queued and waiting to start.
+ * **Pending**: The task is queued and waiting to start.
+ * **Running**: The task is actively processing files.
+ * **Processing**: The task is performing ingestion operations.
-* **Running**: The task is actively processing files.
+ To stop an active task, click **Cancel**. Canceling a task stops processing immediately and marks the ingestion as failed.
-* **Processing**: The task is performing ingestion operations.
+* The **Recent Tasks** section lists recently finished tasks.
-* **Failed**: Something went wrong during ingestion, or the task was manually canceled.
-For troubleshooting advice, see [Troubleshoot ingestion](#troubleshoot-ingestion).
+ :::warning
+ **Completed** doesn't mean success.
-To stop an active task, click **Cancel**. Canceling a task stops processing immediately and marks the task as **Failed**.
+ A completed task can report successful ingestions, failed ingestions, or both, depending on the number of files processed.
+ :::
+
+ Check the **Success** and **Failed** counts for each completed task to determine the overall success rate.
+
+ **Failed** means something went wrong during ingestion, or the task was manually canceled.
+ For more information, see [Troubleshoot ingestion](#troubleshoot-ingestion).
+
+For each task, depending on its state, you can find the task ID, start time, duration, number of files processed successfully, number of files that failed, and the number of files enqueued for processing.
### Ingestion performance expectations
@@ -247,9 +258,9 @@ The following issues can occur during document ingestion.
If an ingestion task fails, do the following:
-* Make sure you are uploading supported file types.
-* Split excessively large files into smaller files before uploading.
-* Remove unusual embedded content, such as videos or animations, before uploading. Although Docling can replace some non-text content with placeholders during ingestion, some embedded content might cause errors.
+* Make sure you uploaded only supported file types.
+* Split very large files into smaller files.
+* Remove unusual or complex embedded content, such as videos or animations. Although Docling can replace some non-text content with placeholders during ingestion, some embedded content might cause errors.
* Make sure your Podman/Docker VM has sufficient memory for the ingestion tasks.
The minimum recommendation is 8 GB of RAM.
If you regularly upload large files, more RAM is recommended.
@@ -261,17 +272,17 @@ For more information, see [Memory issue with Podman on macOS](/support/troublesh
If the OpenRAG **Chat** doesn't seem to use your documents correctly, [browse your knowledge base](/knowledge#browse-knowledge) to confirm that the documents are uploaded in full, and the chunks are correct.
If the documents are present and well-formed, check your [knowledge filters](/knowledge-filters).
-If a global filter is applied, make sure the expected documents are included in the global filter.
-If the global filter excludes any documents, the agent cannot access those documents unless you apply a chat-level filter or change the global filter.
+If you applied a filter to the chat, make sure the expected documents aren't excluded by the filter settings.
+You can test this by applying the filter when you [browse the knowledge base](/knowledge#browse-knowledge).
+If the filter excludes any documents, the agent cannot access those documents.
+Be aware that some settings create dynamic filters that don't always produce the same results, such as a **Search query** combined with a low **Response limit**.
-If text is missing or incorrectly processed, you need to reupload the documents after modifying the ingestion parameters or the documents themselves.
+If the document chunks have missing, incorrect, or unexpected text, you must [delete the documents](/knowledge#delete-knowledge) from your knowledge base, modify the [ingestion parameters](/knowledge#knowledge-ingestion-settings) or the documents themselves, and then reingest the documents.
For example:
* Break combined documents into separate files for better metadata context.
* Make sure scanned documents are legible enough for extraction, and enable the **OCR** option. Poorly scanned documents might require additional preparation or rescanning before ingestion.
-* Adjust the **Chunk Size** and **Chunk Overlap** settings to better suit your documents. Larger chunks provide more context but can include irrelevant information, while smaller chunks yield more precise semantic search but can lack context.
-
-For more information about modifying ingestion parameters and flows, see [Knowledge ingestion settings](/knowledge#knowledge-ingestion-settings).
+* Adjust the **Chunk size** and **Chunk overlap** settings to better suit your documents. Larger chunks provide more context but can include irrelevant information, while smaller chunks yield more precise semantic search but can lack context.
## See also
diff --git a/docs/docs/core-components/knowledge-filters.mdx b/docs/docs/core-components/knowledge-filters.mdx
index f2a2c2b6..351dae01 100644
--- a/docs/docs/core-components/knowledge-filters.mdx
+++ b/docs/docs/core-components/knowledge-filters.mdx
@@ -4,6 +4,7 @@ slug: /knowledge-filters
---
import Icon from "@site/src/components/icon/icon";
+import PartialAnonymousUserOwner from '@site/docs/_partial-anonymous-user-owner.mdx';
OpenRAG's knowledge filters help you organize and manage your [knowledge base](/knowledge) by creating pre-defined views of your documents.
@@ -41,16 +42,25 @@ This is purely cosmetic, but it can help you visually distinguish different sets
Use the filter settings to narrow the scope of documents that the filter captures:
* **Search Query**: Enter a natural language text string for semantic search.
- When you apply a filter that has a **Search Query**, only documents matching the search query are included.
- It is recommended that you use the **Score Threshold** setting to avoid returning irrelevant documents.
+
+ When you apply a filter that has a **Search Query**, only documents matching the search query are included.
+ It is recommended that you also use the **Score Threshold** setting to avoid returning irrelevant documents.
+
* **Data Sources**: Select specific files and folders to include in the filter.
- This is useful if you want to create a filter for a specific project or topic and you know the specific documents you want to include.
- Similarly, if you upload a folder of documents, you might want to create a filter that only includes the documents from that folder.
+
+ This is useful if you want to create a filter for a specific project or topic and you know the specific documents you want to include.
+ Similarly, if you upload a folder of documents or enable an OAuth connector, you might want to create a filter that only includes the documents from that source.
+
* **Document Types**: Filter by file type.
+
* **Owners**: Filter by the user that uploaded the documents.
- **Anonymous User** means a document was uploaded in an OpenRAG environment where OAuth isn't configured.
- * **Connectors**: Filter by [upload source](/ingestion), such as the local file system or a Google Drive OAuth connector.
+
+
+
+ * **Connectors**: Filter by [upload source](/ingestion), such as the local file system or an OAuth connector.
+
* **Response Limit**: Set the maximum number of results to return from the knowledge base. The default is `10`, which means the filter returns only the top 10 most relevant documents.
+
* **Score Threshold**: Set the minimum relevance score for similarity search. The default score is `0`. A threshold is recommended to avoid returned irrelevant documents.
6. Click **Create Filter**.
@@ -65,7 +75,7 @@ On the filter settings pane, edit the filter as desired, and then click **Update
In the OpenRAG **Chat**, click **Filter**, and then select the filter to apply.
Chat filters apply to one chat session only.
-You can also use filters when [browsing the **Knowledge** page](/knowledge#browse-knowledge).
+You can also use filters when [browsing your knowledge base](/knowledge#browse-knowledge).
This is a helpful way to test filters and manage knowledge bases that have many documents.
## Delete a filter
diff --git a/docs/docs/core-components/knowledge.mdx b/docs/docs/core-components/knowledge.mdx
index 6686475c..53900987 100644
--- a/docs/docs/core-components/knowledge.mdx
+++ b/docs/docs/core-components/knowledge.mdx
@@ -5,6 +5,7 @@ slug: /knowledge
import Icon from "@site/src/components/icon/icon";
import PartialOpenSearchAuthMode from '@site/docs/_partial-opensearch-auth-mode.mdx';
+import PartialAnonymousUserOwner from '@site/docs/_partial-anonymous-user-owner.mdx';
OpenRAG includes a built-in [OpenSearch](https://docs.opensearch.org/latest/) instance that serves as the underlying datastore for your _knowledge_ (documents).
This specialized database is used to store and retrieve your documents and the associated vector data (embeddings).
@@ -24,31 +25,60 @@ The **Knowledge** page lists the documents OpenRAG has ingested into your OpenSe
To explore the raw contents of your knowledge base, click **Knowledge** to get a list of all ingested documents.
-Click a document to view the chunks produced from splitting the document during ingestion as well as technical details related to chunking.
+### Inspect knowledge
-For each document, the **Knowledge** page provides metadata, including the size, type, user that uploaded the document, the number of chunks created from the document, and the embedding model and dimensions used to embed the document.
+For each document, the **Knowledge** page provides the following information:
-The search field at the top of the **Knowledge** page allows you to search for specific documents by name, contents, or with a knowledge filter:
+* **Source**: Name of the ingested content, such as the file name.
-* To search with a text string, enter your search string in the search field, and then press Enter.
+* **Size**
-* To apply a [knowledge filter](/knowledge-filters) when browsing your knowledge base, click the filter in the **Knowledge Filters** list.
+* **Type**
+
+* **Owner**: User that uploaded the document.
+
+
+
+* **Chunks**: Number of chunks created by splitting the document during ingestion.
+
+ Click a document to view the individual chunks and technical details related to chunking.
+ If the chunks seem incorrect or incomplete, see [Troubleshoot ingestion](/ingestion#troubleshoot-ingestion).
+
+* **Avg score**: Average similarity score across all chunks of the document.
+
+ If you [search the knowledge base](#search-knowledge), the **Avg score** column shows the similarity score for your search query or filter.
+
+* **Embedding model** and **Dimensions**: The embedding model and dimensions used to embed the chunks.
+
+* **Status**: Status of document ingestion.
+If ingestion is complete and successful, then the status is **Active**.
+For more information, see [Monitor ingestion](/ingestion#monitor-ingestion).
+
+### Search knowledge {#search-knowledge}
+
+You can use the search field on the **Knowledge** page to find documents using semantic search and knowledge filters:
+
+To search all documents, enter a search string in the search field, and then press Enter.
+
+To apply a [knowledge filter](/knowledge-filters), select the filter from the **Knowledge Filters** list.
The filter settings pane opens, and the filter appears in the search field.
To remove the filter, close the filter settings pane or clear the filter from the search field.
- If a knowledge filter contains a search query, that query is applied in addition to any additional string you enter in the search field.
+You can use the filter alone or in combination with a search string.
+If a knowledge filter has a **Search Query**, that query is applied in addition to any text string you enter in the search field.
-When you search, the **Avg score** column shows how relevant each document is to your search query.
+Only one filter can be applied at a time.
### Default documents {#default-documents}
By default, OpenRAG includes some initial documents about OpenRAG.
These documents are ingested automatically during the [application onboarding process](/install#application-onboarding).
-You can use these documents to ask OpenRAG about itself, and to test the [**Chat**](/chat) feature before uploading your own documents.
+You can use these documents to ask OpenRAG about itself, or to test the [**Chat**](/chat) feature before uploading your own documents.
-If you [delete](#delete-knowledge) these documents, you won't be able to ask OpenRAG about itself and it's own functionality.
+If you [delete these documents](#delete-knowledge), then you won't be able to ask OpenRAG about itself and it's own functionality.
It is recommended that you keep these documents, and use [filters](/knowledge-filters) to separate them from your other knowledge.
+An **OpenRAG Docs** filter is created automatically for these documents.
## OpenSearch authentication and document access {#auth}
diff --git a/docs/docs/get-started/docker.mdx b/docs/docs/get-started/docker.mdx
index 1fc4e798..0c65e9c5 100644
--- a/docs/docs/get-started/docker.mdx
+++ b/docs/docs/get-started/docker.mdx
@@ -100,7 +100,7 @@ The following variables are required or recommended:
This port is required to deploy OpenRAG successfully; don't use a different port.
Additionally, this enables the [MLX framework](https://opensource.apple.com/projects/mlx/) for accelerated performance on Apple Silicon Mac machines.
-2. Confirm `docling serve` is running.
+2. Confirm `docling serve` is running:
```bash
uv run python scripts/docling_ctl.py status
From 7a2a466adfdd04331fb809e4b2a7590a133a3b79 Mon Sep 17 00:00:00 2001
From: April M <36110273+aimurphy@users.noreply.github.com>
Date: Fri, 16 Jan 2026 15:43:46 -0800
Subject: [PATCH 5/7] docling_serve_url
---
docs/docs/core-components/knowledge.mdx | 13 ++++++++-----
docs/docs/get-started/docker.mdx | 12 +++++++++---
docs/docs/reference/configuration.mdx | 3 +++
3 files changed, 20 insertions(+), 8 deletions(-)
diff --git a/docs/docs/core-components/knowledge.mdx b/docs/docs/core-components/knowledge.mdx
index 53900987..f8f2fc0c 100644
--- a/docs/docs/core-components/knowledge.mdx
+++ b/docs/docs/core-components/knowledge.mdx
@@ -126,10 +126,10 @@ The default embedding dimension is `1536`, and the default model is the OpenAI `
If you want to use an unsupported model, you must manually set the model in your [OpenRAG `.env` file](/reference/configuration).
If you use an unsupported embedding model that doesn't have defined dimensions in `settings.py`, then OpenRAG falls back to the default dimensions (1536) and logs a warning. OpenRAG's OpenSearch instance and flows continue to work, but [similarity search](https://www.ibm.com/think/topics/vector-search) quality can be affected if the actual model dimensions aren't 1536.
-To change the embedding model after onboarding, it is recommended that you modify the embedding model configuration on the OpenRAG **Settings** page or in your [OpenRAG `.env` file](/reference/configuration).
+To change the embedding model after onboarding, modify the embedding model configuration on the OpenRAG **Settings** page or in your [OpenRAG `.env` file](/reference/configuration).
This ensures that all relevant [OpenRAG flows](/agents) are updated to use the new embedding model configuration.
-If you edit these settings in the `.env` or `docker-compose` files, you must [stop and restart the OpenRAG containers](/manage-services#stop-and-start-containers) to apply the changes.
+If you edit these settings in the `.env` file, you must [stop and restart the OpenRAG containers](/manage-services#stop-and-start-containers) to apply the changes.
### Set Docling parameters
@@ -137,14 +137,17 @@ OpenRAG uses [Docling](https://docling-project.github.io/docling/) for document
When you [upload documents](/ingestion), Docling processes the files, splits them into chunks, and stores them as separate, structured documents in your OpenSearch knowledge base.
-#### Select a Docling implementation
+#### Select a Docling implementation {#select-a-docling-implementation}
You can use either Docling Serve or OpenRAG's built-in Docling ingestion pipeline to process documents.
* **Docling Serve ingestion**: By default, OpenRAG uses [Docling Serve](https://github.com/docling-project/docling-serve).
-This means that OpenRAG starts a `docling serve` process on your local machine and runs Docling ingestion through an API service.
+It starts a local `docling serve` process, and then runs Docling ingestion through the Docling Serve API.
-* **Built-in Docling ingestion**: If you want to use OpenRAG's built-in Docling ingestion pipeline instead of the separate Docling Serve service, set `DISABLE_INGEST_WITH_LANGFLOW=true` in your [OpenRAG environment variables](/reference/configuration#document-processing-settings).
+ To use a remote `docling serve` instance or your own local instance, set `DOCLING_SERVE_URL=http://**HOST_IP**:5001` in your [OpenRAG `.env` file](/reference/configuration#document-processing-settings).
+ The service must run on port 5001.
+
+* **Built-in Docling ingestion**: If you want to use OpenRAG's built-in Docling ingestion pipeline instead of the separate Docling Serve service, set `DISABLE_INGEST_WITH_LANGFLOW=true` in your [OpenRAG `.env` file](/reference/configuration#document-processing-settings).
The built-in pipeline uses the Docling processor directly instead of through the Docling Serve API.
For the underlying functionality, see [`processors.py`](https://github.com/langflow-ai/openrag/blob/main/src/models/processors.py#L58) in the OpenRAG repository.
diff --git a/docs/docs/get-started/docker.mdx b/docs/docs/get-started/docker.mdx
index 0c65e9c5..58071201 100644
--- a/docs/docs/get-started/docker.mdx
+++ b/docs/docs/get-started/docker.mdx
@@ -88,7 +88,7 @@ The following variables are required or recommended:
## Start services
-1. Start `docling serve` on port 5001 on the host machine:
+1. To use the default Docling Serve implementation, start `docling serve` on port 5001 on the host machine using the included script:
```bash
uv run python scripts/docling_ctl.py start --port 5001
@@ -97,10 +97,16 @@ The following variables are required or recommended:
Docling cannot run inside a Docker container due to system-level dependencies, so you must manage it as a separate service on the host machine.
For more information, see [Stop, start, and inspect native services](/manage-services#start-native-services).
- This port is required to deploy OpenRAG successfully; don't use a different port.
+ Port 5001 is required to deploy OpenRAG successfully; don't use a different port.
Additionally, this enables the [MLX framework](https://opensource.apple.com/projects/mlx/) for accelerated performance on Apple Silicon Mac machines.
-2. Confirm `docling serve` is running:
+ :::tip
+ If you don't want to use the default Docling Serve implementation, see [Select a Docling implementation](/knowledge#select-a-docling-implementation).
+ :::
+
+2. Confirm `docling serve` is running.
+
+ The following command checks the status of the default Docling Serve implementation:
```bash
uv run python scripts/docling_ctl.py status
diff --git a/docs/docs/reference/configuration.mdx b/docs/docs/reference/configuration.mdx
index 8ba77b8f..537def62 100644
--- a/docs/docs/reference/configuration.mdx
+++ b/docs/docs/reference/configuration.mdx
@@ -62,12 +62,15 @@ Some of these variables are immutable and can only be changed by redeploying Ope
Control how OpenRAG [processes and ingests documents](/ingestion) into your knowledge base.
+Most of these settings can be configured on the OpenRAG **Settings** page or in the `.env` file.
+
| Variable | Default | Description |
|----------|---------|-------------|
| `CHUNK_OVERLAP` | `200` | Overlap between chunks. |
| `CHUNK_SIZE` | `1000` | Text chunk size for document processing. |
| `DISABLE_INGEST_WITH_LANGFLOW` | `false` | Disable Langflow ingestion pipeline. |
| `DOCLING_OCR_ENGINE` | Set by OS | OCR engine for document processing. For macOS, `ocrmac`. For any other OS, `easyocr`. |
+| `DOCLING_SERVE_URL` | `http://**HOST_IP**:5001` | URL for the [Docling Serve instance](/knowledge#select-a-docling-implementation). By default, OpenRAG starts a local `docling serve` process and auto-detects the host. To use your own local or remote Docling Serve instance, set this variable to the full path to the target instance. The service must run on port 5001. |
| `OCR_ENABLED` | `false` | Enable OCR for image processing. |
| `OPENRAG_DOCUMENTS_PATH` | `~/.openrag/documents` | The [local documents path](/knowledge#set-the-local-documents-path) for ingestion. |
| `PICTURE_DESCRIPTIONS_ENABLED` | `false` | Enable picture descriptions. |
From e3f2b1a5eab3949bcf62d45a50934a3fa311ae37 Mon Sep 17 00:00:00 2001
From: April M <36110273+aimurphy@users.noreply.github.com>
Date: Fri, 16 Jan 2026 15:51:42 -0800
Subject: [PATCH 6/7] build error
---
docs/docs/reference/configuration.mdx | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/docs/reference/configuration.mdx b/docs/docs/reference/configuration.mdx
index 537def62..1a3f9af7 100644
--- a/docs/docs/reference/configuration.mdx
+++ b/docs/docs/reference/configuration.mdx
@@ -99,7 +99,7 @@ For better security, it is recommended to set `LANGFLOW_SUPERUSER_PASSWORD` so t
| `LANGFLOW_SUPERUSER_PASSWORD` | Not set | Langflow administrator password. If this variable isn't set, then the Langflow server starts _without_ authentication enabled. It is recommended to set `LANGFLOW_SUPERUSER_PASSWORD` so the [Langflow server starts with authentication enabled](https://docs.langflow.org/api-keys-and-authentication#start-a-langflow-server-with-authentication-enabled). |
| `LANGFLOW_URL` | `http://localhost:7860` | URL for the Langflow instance. |
| `LANGFLOW_CHAT_FLOW_ID`, `LANGFLOW_INGEST_FLOW_ID`, `NUDGES_FLOW_ID` | Built-in flow IDs | These variables are set automatically to the IDs of the chat, ingestion, and nudges [flows](/agents). The default values are found in [`.env.example`](https://github.com/langflow-ai/openrag/blob/main/.env.example). Only change these values if you want to replace a built-in flow with your own custom flow. The flow JSON must be present in your version of the OpenRAG codebase. For example, if you [deploy self-managed services](/docker), you can add the flow JSON to your local clone of the OpenRAG repository before deploying OpenRAG. |
-| `SYSTEM_PROMPT` | `You are a helpful AI assistant with access to a knowledge base. Answer questions based on the provided context.` | System prompt instructions for the agent driving the **Agent** flow (OpenRAG **Chat**). |
+| `SYSTEM_PROMPT` | `You are a helpful AI assistant with access to a knowledge base. Answer questions based on the provided context.` | System prompt instructions for the agent driving the **Agent** flow (OpenRAG **Chat**). |
## OAuth provider settings
From 6c23b6ece606b27d8121ea35610f9de0ed4abfe0 Mon Sep 17 00:00:00 2001
From: April M <36110273+aimurphy@users.noreply.github.com>
Date: Fri, 16 Jan 2026 15:57:53 -0800
Subject: [PATCH 7/7] changelog link
---
docs/sidebars.js | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/docs/sidebars.js b/docs/sidebars.js
index 0cc0be91..d226e268 100644
--- a/docs/sidebars.js
+++ b/docs/sidebars.js
@@ -82,6 +82,11 @@ const sidebars = {
},
"support/contribute",
"support/troubleshoot",
+ {
+ type: "link",
+ label: "Changelog",
+ href: "https://github.com/langflow-ai/openrag/releases",
+ },
],
};