From d9d2e7f68d7d27790e0360f38e81a36e7db20010 Mon Sep 17 00:00:00 2001
From: Mendon Kissling <59585235+mendonk@users.noreply.github.com>
Date: Thu, 25 Sep 2025 11:04:17 -0400
Subject: [PATCH 01/10] init
---
docs/docs/core-components/knowledge.mdx | 4 ++++
docs/sidebars.js | 5 +++++
2 files changed, 9 insertions(+)
create mode 100644 docs/docs/core-components/knowledge.mdx
diff --git a/docs/docs/core-components/knowledge.mdx b/docs/docs/core-components/knowledge.mdx
new file mode 100644
index 00000000..67f1ef24
--- /dev/null
+++ b/docs/docs/core-components/knowledge.mdx
@@ -0,0 +1,4 @@
+---
+title: Knowledge stored with OpenSearch
+slug: /knowledge
+---
\ No newline at end of file
diff --git a/docs/sidebars.js b/docs/sidebars.js
index f76fdcda..c4226946 100644
--- a/docs/sidebars.js
+++ b/docs/sidebars.js
@@ -56,6 +56,11 @@ const sidebars = {
id: "core-components/agents",
label: "Langflow Agents"
},
+ {
+ type: "doc",
+ id: "core-components/knowledge",
+ label: "Knowledge stored with OpenSearch"
+ }
],
},
{
From d4023418d4165f4515380c7a376e8498564ef949 Mon Sep 17 00:00:00 2001
From: Mendon Kissling <59585235+mendonk@users.noreply.github.com>
Date: Thu, 25 Sep 2025 14:27:09 -0400
Subject: [PATCH 02/10] oauth-connector-knowledge-ingest
---
docs/docs/_partial-modify-flows.mdx | 5 ++
docs/docs/core-components/agents.mdx | 11 ++-
docs/docs/core-components/knowledge.mdx | 99 ++++++++++++++++++++++++-
docs/docs/get-started/quickstart.mdx | 4 +-
4 files changed, 112 insertions(+), 7 deletions(-)
create mode 100644 docs/docs/_partial-modify-flows.mdx
diff --git a/docs/docs/_partial-modify-flows.mdx b/docs/docs/_partial-modify-flows.mdx
new file mode 100644
index 00000000..852777e5
--- /dev/null
+++ b/docs/docs/_partial-modify-flows.mdx
@@ -0,0 +1,5 @@
+import Icon from "@site/src/components/icon/icon";
+
+All flows included with OpenRAG are designed to be modular, performant, and provider-agnostic.
+To modify a flow, click **Settings**, and click **Edit in Langflow**.
+Flows are edited in the same way as in the [Langflow visual editor](https://docs.langflow.org/concepts-overview).
\ No newline at end of file
diff --git a/docs/docs/core-components/agents.mdx b/docs/docs/core-components/agents.mdx
index 121ca3d5..8388bd60 100644
--- a/docs/docs/core-components/agents.mdx
+++ b/docs/docs/core-components/agents.mdx
@@ -3,6 +3,11 @@ title: Agents powered by Langflow
slug: /agents
---
+import Icon from "@site/src/components/icon/icon";
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+import PartialModifyFlows from '@site/docs/_partial-modify-flows.mdx';
+
OpenRAG leverages Langflow's Agent component to power the OpenRAG Open Search Agent flow.
This flow intelligently chats with your knowledge by embedding your query, comparing it the vector database embeddings, and generating a response with the LLM.
@@ -25,7 +30,7 @@ In an agentic context, tools are functions that the agent can run to perform tas
## Use the OpenRAG Open Search Agent flow
If you've chatted with your knowledge in OpenRAG, you've already experienced the OpenRAG Open Search Agent chat flow.
-To view the flow, click **Settings**, and then click **Edit in Langflow**.
+To view the flow, click **Settings**, and then click **Edit in Langflow**.
This flow contains seven components:
* The Agent component orchestrates the entire flow by deciding when to search the knowledge base, how to formulate search queries, and how to combine retrieved information with the user's question to generate a comprehensive response.
@@ -38,9 +43,7 @@ The Agent behaves according to the prompt in the **Agent Instructions** field.
This filter is the Knowledge filter, and filters which knowledge sources to search through.
* The Agent component's Output port is connected to the Chat Output component, which returns the final response to the user or application.
-All flows included with OpenRAG are designed to be modular, performant, and provider-agnostic.
-To modify a flow, click **Settings**, and click **Edit in Langflow**.
-Flows are edited in the same way as in the [Langflow visual editor](https://docs.langflow.org/concepts-overview).
+
For an example of changing out the agent's LLM in OpenRAG, see the [Quickstart](/quickstart#change-components).
diff --git a/docs/docs/core-components/knowledge.mdx b/docs/docs/core-components/knowledge.mdx
index 67f1ef24..5e003880 100644
--- a/docs/docs/core-components/knowledge.mdx
+++ b/docs/docs/core-components/knowledge.mdx
@@ -1,4 +1,101 @@
---
title: Knowledge stored with OpenSearch
slug: /knowledge
----
\ No newline at end of file
+---
+
+import Icon from "@site/src/components/icon/icon";
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+import PartialModifyFlows from '@site/docs/_partial-modify-flows.mdx';
+
+OpenRAG uses [OpenSearch](https://docs.opensearch.org/latest/) for its vector-backed knowledge store.
+OpenSearch provides powerful hybrid search capabilities with enterprise-grade security and multi-tenancy support.
+
+## OpenRAG default configuration
+
+OpenRAG creates a specialized OpenSearch index called `documents` with the values defined at `src/config/settings.py`.
+- **Vector Dimensions**: 1536-dimensional embeddings using OpenAI's `text-embedding-3-small` model.
+- **KNN Vector Type**: Uses `knn_vector` field type with `disk_ann` method and `jvector` engine.
+- **Distance Metric**: L2 (Euclidean) distance for vector similarity.
+- **Performance Optimization**: Configured with `ef_construction: 100` and `m: 16` parameters.
+
+OpenRAG supports hybrid search, which combines semantic and keyword search.
+
+## Explore knowledge
+
+To explore your current knowledge, click **Knowledge**.
+The Knowledge page lists the documents OpenRAG has ingested into the OpenSearch vector database's `documents` index.
+
+Click on a document to display the chunks derived from splitting the default documents into the vector database.
+Documents are processed with the **Knowledge Ingest** flow, so to split your documents differently, edit the **Knowledge Ingest** flow.
+
+
+
+## Ingest knowledge
+
+OpenRAG supports knowledge ingestion through direct file uploads and OAuth connectors.
+
+### Upload files
+
+- Files uploaded directly through the web interface
+- Processed immediately using the standard pipeline
+
+### Upload files through OAuth connectors
+
+OpenRAG supports the following enterprise-grade OAuth connectors for seamless document synchronization.
+
+- **Google Drive**
+- **OneDrive**
+- **AWS**
+
+OAuth integration allows your OpenRAG server to authenticate users and applications through any OAuth 2.0 compliant service. When users or applications connect to your server, they are redirected to your chosen OAuth provider to authenticate. Upon successful authentication, they are granted access to the connector.
+
+Before configuring OAuth in OpenRAG, you must first set up an OAuth application with an external OAuth 2.0 service provider. You must register your OpenRAG server as an OAuth client, and then obtain the `client` and `secret` keys to complete the configuration in OpenRAG.
+
+To add an OAuth connector to OpenRAG, do the following.
+This example uses Google OAuth.
+If you wish to use another provider, add the secrets to another provider.
+
+
+
+ 1. If OpenRAG is running, stop it with **Status** > **Stop Services**.
+ 2. Click **Advanced Setup**.
+ 3. Add the OAuth provider's client and secret key in the [Advanced Setup](/install#advanced-setup) menu.
+ 4. Click **Save Configuration**.
+ The TUI generates a new `.env` file with your OAuth values.
+ 5. Click **Start Container Services**.
+
+
+ 1. Stop the Docker deployment.
+ 2. Add the OAuth provider's client and secret key in the `.env` file for Docker Compose.
+ ```bash
+ GOOGLE_OAUTH_CLIENT_ID='YOUR_OAUTH_CLIENT_ID'
+ GOOGLE_OAUTH_CLIENT_SECRET='YOUR_OAUTH_CLIENT_SECRET'
+ ```
+ 3. Save your `.env`. file.
+ 4. Start the Docker deployment.
+
+
+
+The OpenRAG frontend at `http://localhost:3000` now redirects to an OAuth callback login page for your OAuth provider.
+A successful authentication opens OpenRAG with the required scopes for your connected storage.
+
+To add knowledge from an OAuth-connected storage provider, do the following:
+
+1. Click **Add Knowledge**, and then select the storage provider, for example, **Google Drive**.
+The **Add Cloud Knowledge** page opens.
+2. To add files or folders from the connected storage, click **Add Files**.
+Select the files or folders you want and click **Select**.
+You can select multiples.
+3. When your files are selected, click **Ingest Files**.
+The ingestion process may take some time, depending on the size of your documents.
+4. When ingestion is complete, your documents are available in the Knowledge screen.
+
+## Knowledge Filter System
+
+OpenRAG includes a knowledge filter system for organizing and managing document collections:
+
+
+
+
+
diff --git a/docs/docs/get-started/quickstart.mdx b/docs/docs/get-started/quickstart.mdx
index fe039859..0e53534f 100644
--- a/docs/docs/get-started/quickstart.mdx
+++ b/docs/docs/get-started/quickstart.mdx
@@ -35,9 +35,9 @@ Get started with OpenRAG by loading your knowledge, swapping out your language m
These events log the agent's request to the tool and the tool's response, so you have direct visibility into your agent's functionality.
If you aren't getting the results you need, you can further tune the knowledge ingestion and agent behavior in the next section.
-## Swap out the language model to modify agent behavior {change-components}
+## Swap out the language model to modify agent behavior {#change-components}
-To modify the knowledge ingestion or Agent behavior, click **Settings**.
+To modify the knowledge ingestion or Agent behavior, click **Settings**.
In this example, you'll try a different LLM to demonstrate how the Agent's response changes.
From a804035427740726e26258923b8c8aa511a4116d Mon Sep 17 00:00:00 2001
From: Mendon Kissling <59585235+mendonk@users.noreply.github.com>
Date: Thu, 25 Sep 2025 14:56:22 -0400
Subject: [PATCH 03/10] direct-file-upload
---
docs/docs/core-components/knowledge.mdx | 43 ++++++++++++++++---------
1 file changed, 27 insertions(+), 16 deletions(-)
diff --git a/docs/docs/core-components/knowledge.mdx b/docs/docs/core-components/knowledge.mdx
index 5e003880..ff8a3bd5 100644
--- a/docs/docs/core-components/knowledge.mdx
+++ b/docs/docs/core-components/knowledge.mdx
@@ -11,23 +11,14 @@ import PartialModifyFlows from '@site/docs/_partial-modify-flows.mdx';
OpenRAG uses [OpenSearch](https://docs.opensearch.org/latest/) for its vector-backed knowledge store.
OpenSearch provides powerful hybrid search capabilities with enterprise-grade security and multi-tenancy support.
-## OpenRAG default configuration
-
-OpenRAG creates a specialized OpenSearch index called `documents` with the values defined at `src/config/settings.py`.
-- **Vector Dimensions**: 1536-dimensional embeddings using OpenAI's `text-embedding-3-small` model.
-- **KNN Vector Type**: Uses `knn_vector` field type with `disk_ann` method and `jvector` engine.
-- **Distance Metric**: L2 (Euclidean) distance for vector similarity.
-- **Performance Optimization**: Configured with `ef_construction: 100` and `m: 16` parameters.
-
-OpenRAG supports hybrid search, which combines semantic and keyword search.
-
## Explore knowledge
-To explore your current knowledge, click **Knowledge**.
The Knowledge page lists the documents OpenRAG has ingested into the OpenSearch vector database's `documents` index.
+To explore your current knowledge, click **Knowledge**.
Click on a document to display the chunks derived from splitting the default documents into the vector database.
-Documents are processed with the **Knowledge Ingest** flow, so to split your documents differently, edit the **Knowledge Ingest** flow.
+
+Documents are processed with the default **Knowledge Ingest** flow, so if you want to split your documents differently, edit the **Knowledge Ingest** flow.
@@ -35,12 +26,19 @@ Documents are processed with the **Knowledge Ingest** flow, so to split your doc
OpenRAG supports knowledge ingestion through direct file uploads and OAuth connectors.
-### Upload files
+### Direct file ingestion
-- Files uploaded directly through the web interface
-- Processed immediately using the standard pipeline
+The **Knowledge Ingest** flow uses Langflow's [**File** component](https://docs.langflow.org/components-data#file) to split and embed files loaded from your local machine into the OpenSearch database.
-### Upload files through OAuth connectors
+The default path to your local folder is mounted from the `./documents` folder in your OpenRAG project directory to the `/app/documents/` directory inside the Docker container. Files added to the host or the container will be visible in both locations. To configure this location, modify the **Documents Paths** variable in either the TUI's [Advanced Setup](/install#advanced-setup) or in the `.env` used by Docker Compose. Add multiple paths in a comma-separated list with no spaces. For example, `./documents,/Users/username/Documents`.
+
+To load and process a single file from the mapped location, click **Add Knowledge**, and then click **Add File**.
+The file is loaded into your OpenSearch database, and appears in the Knowledge page.
+
+To load and process a directory from the mapped location, click **Add Knowledge**, and then click **Process Folder**.
+The files are loaded into your OpenSearch database, and appear in the Knowledge page.
+
+### Ingest files through OAuth connectors
OpenRAG supports the following enterprise-grade OAuth connectors for seamless document synchronization.
@@ -98,4 +96,17 @@ OpenRAG includes a knowledge filter system for organizing and managing document
+## OpenRAG default configuration
+
+OpenRAG creates a specialized OpenSearch index called `documents` with the values defined at `src/config/settings.py`.
+- **Vector Dimensions**: 1536-dimensional embeddings using OpenAI's `text-embedding-3-small` model.
+- **KNN Vector Type**: Uses `knn_vector` field type with `disk_ann` method and `jvector` engine.
+- **Distance Metric**: L2 (Euclidean) distance for vector similarity.
+- **Performance Optimization**: Configured with `ef_construction: 100` and `m: 16` parameters.
+
+OpenRAG supports hybrid search, which combines semantic and keyword search.
+
+
+
+
From 08404015265d297298a3d7b766a2450a46203420 Mon Sep 17 00:00:00 2001
From: Mendon Kissling <59585235+mendonk@users.noreply.github.com>
Date: Thu, 25 Sep 2025 15:15:22 -0400
Subject: [PATCH 04/10] sync-cloud-conns
---
docs/docs/core-components/knowledge.mdx | 14 ++++++++++++--
1 file changed, 12 insertions(+), 2 deletions(-)
diff --git a/docs/docs/core-components/knowledge.mdx b/docs/docs/core-components/knowledge.mdx
index ff8a3bd5..92a285d3 100644
--- a/docs/docs/core-components/knowledge.mdx
+++ b/docs/docs/core-components/knowledge.mdx
@@ -89,12 +89,22 @@ You can select multiples.
The ingestion process may take some time, depending on the size of your documents.
4. When ingestion is complete, your documents are available in the Knowledge screen.
-## Knowledge Filter System
+### Sync cloud connectors
-OpenRAG includes a knowledge filter system for organizing and managing document collections:
+Your connected data sources are found in the **Settings** page.
+When you click **Sync Now** for a connected cloud service like Google Drive, OpenRAG scans your connected Google Drive account to find files that match your sync criteria. Sync criteria are controlled in **Sync Settings** on the same page. You can sync all files, or select a maximum number of files to sync.
+For each file found, OpenRAG downloads, converts, and embeds the processed content into OpenSearch.
+You can monitor the sync progress in the **Tasks** sidebar.
+
+Once processing is complete, the synced documents become available in your knowledge base and can be searched through the chat interface or Knowledge page.
+
+## Knowledge filters
+
+OpenRAG includes a knowledge filter system for organizing and managing document collections.
+Knowledge filters are saved search configurations that allow you to create custom views of your document collection. They store search queries, filter criteria, and display settings that can be reused across different parts of the application.
## OpenRAG default configuration
From 2ca06797454330bf34986b5e2a124cff9f3b886f Mon Sep 17 00:00:00 2001
From: Mendon Kissling <59585235+mendonk@users.noreply.github.com>
Date: Thu, 25 Sep 2025 16:23:44 -0400
Subject: [PATCH 05/10] filter-knowledge
---
docs/docs/core-components/knowledge.mdx | 44 ++++++++++++++++++++-----
docs/sidebars.js | 2 +-
2 files changed, 36 insertions(+), 10 deletions(-)
diff --git a/docs/docs/core-components/knowledge.mdx b/docs/docs/core-components/knowledge.mdx
index 92a285d3..7c5105b2 100644
--- a/docs/docs/core-components/knowledge.mdx
+++ b/docs/docs/core-components/knowledge.mdx
@@ -1,5 +1,5 @@
---
-title: Knowledge stored with OpenSearch
+title: OpenSearch Knowledge
slug: /knowledge
---
@@ -46,9 +46,9 @@ OpenRAG supports the following enterprise-grade OAuth connectors for seamless do
- **OneDrive**
- **AWS**
-OAuth integration allows your OpenRAG server to authenticate users and applications through any OAuth 2.0 compliant service. When users or applications connect to your server, they are redirected to your chosen OAuth provider to authenticate. Upon successful authentication, they are granted access to the connector.
+OAuth integration allows individual users to connect their personal cloud storage accounts to OpenRAG. Each user must separately authorize OpenRAG to access their own cloud storage files. When a user connects a cloud service, they are redirected to authenticate with that service provider and grant OpenRAG permission to sync documents from their personal cloud storage.
-Before configuring OAuth in OpenRAG, you must first set up an OAuth application with an external OAuth 2.0 service provider. You must register your OpenRAG server as an OAuth client, and then obtain the `client` and `secret` keys to complete the configuration in OpenRAG.
+Before users can connect their cloud storage accounts, you must configure OAuth credentials in OpenRAG. This requires registering OpenRAG as an OAuth application with a cloud provider and obtaining client ID and secret keys for each service you want to support.
To add an OAuth connector to OpenRAG, do the following.
This example uses Google OAuth.
@@ -101,18 +101,44 @@ You can monitor the sync progress in the
Once processing is complete, the synced documents become available in your knowledge base and can be searched through the chat interface or Knowledge page.
-## Knowledge filters
+## Create knowledge filters
OpenRAG includes a knowledge filter system for organizing and managing document collections.
-Knowledge filters are saved search configurations that allow you to create custom views of your document collection. They store search queries, filter criteria, and display settings that can be reused across different parts of the application.
+Knowledge filters are saved search configurations that allow you to create custom views of your document collection. They store search queries, filter criteria, and display settings that can be reused across different parts of OpenRAG.
+
+Knowledge filters help agents work more efficiently with large document collections by focusing their context within relevant documents sets.
+
+To create a knowledge filter, do the following:
+
+1. Click **All Knowledge**, and then click **Create New Filter**.
+ The **Create New Knowledge Filter** pane appears.
+2. Enter a **Name** and **Description**, and then click **Create Filter**.
+A new filter is created with default settings that match everything.
+3. To modify the default filter, click **All Knowledge**, and then click your new filter to edit it in the **Knowledge Filter** pane.
+
+ The following filter options are configurable.
+
+ * **Search Query**: Enter text for semantic search, such as "financial reports from Q4".
+ * **Data Sources**: Select specific data sources or folders to include.
+ * **Document Types**: Filter by file type.
+ * **Owners**: Filter by who uploaded the documents.
+ * **Sources**: Filter by connector types, such as local upload or Google Drive.
+ * **Result Limit**: Set maximum number of results. The default is `10`.
+ * **Score Threshold**: Set minimum relevance score. The default score is `0`.
+
+4. When you're done editing the filter, click **Save Configuration**.
+
+5. To apply the filter to OpenRAG globally, click **All Knowledge**, and then select the filter to apply.
+
+ To apply the filter to a single chat session, in the **Chat** window, click **@**, and then select the filter to apply.
## OpenRAG default configuration
OpenRAG creates a specialized OpenSearch index called `documents` with the values defined at `src/config/settings.py`.
-- **Vector Dimensions**: 1536-dimensional embeddings using OpenAI's `text-embedding-3-small` model.
-- **KNN Vector Type**: Uses `knn_vector` field type with `disk_ann` method and `jvector` engine.
-- **Distance Metric**: L2 (Euclidean) distance for vector similarity.
-- **Performance Optimization**: Configured with `ef_construction: 100` and `m: 16` parameters.
+* **Vector Dimensions**: 1536-dimensional embeddings using OpenAI's `text-embedding-3-small` model.
+* **KNN Vector Type**: Uses `knn_vector` field type with `disk_ann` method and `jvector` engine.
+* **Distance Metric**: L2 (Euclidean) distance for vector similarity.
+* **Performance Optimization**: Configured with `ef_construction: 100` and `m: 16` parameters.
OpenRAG supports hybrid search, which combines semantic and keyword search.
diff --git a/docs/sidebars.js b/docs/sidebars.js
index c4226946..3048cb70 100644
--- a/docs/sidebars.js
+++ b/docs/sidebars.js
@@ -59,7 +59,7 @@ const sidebars = {
{
type: "doc",
id: "core-components/knowledge",
- label: "Knowledge stored with OpenSearch"
+ label: "OpenSearch Knowledge"
}
],
},
From 1c471d24dd747ab237631cc8474c84bf01772ffc Mon Sep 17 00:00:00 2001
From: Mendon Kissling <59585235+mendonk@users.noreply.github.com>
Date: Thu, 25 Sep 2025 17:08:19 -0400
Subject: [PATCH 06/10] oauth-ingestion-anchor
---
docs/docs/core-components/knowledge.mdx | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/docs/core-components/knowledge.mdx b/docs/docs/core-components/knowledge.mdx
index 7c5105b2..d8160f8b 100644
--- a/docs/docs/core-components/knowledge.mdx
+++ b/docs/docs/core-components/knowledge.mdx
@@ -38,7 +38,7 @@ The file is loaded into your OpenSearch database, and appears in the Knowledge p
To load and process a directory from the mapped location, click **Add Knowledge**, and then click **Process Folder**.
The files are loaded into your OpenSearch database, and appear in the Knowledge page.
-### Ingest files through OAuth connectors
+### Ingest files through OAuth connectors (#oauth-ingestion)
OpenRAG supports the following enterprise-grade OAuth connectors for seamless document synchronization.
From 8e9ad9ddc79b3510688969e7610563fd725b13c5 Mon Sep 17 00:00:00 2001
From: Mendon Kissling <59585235+mendonk@users.noreply.github.com>
Date: Fri, 26 Sep 2025 09:36:28 -0400
Subject: [PATCH 07/10] opensearch-syntax
---
docs/docs/core-components/agents.mdx | 8 ++++----
docs/docs/get-started/quickstart.mdx | 6 +++---
2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/docs/docs/core-components/agents.mdx b/docs/docs/core-components/agents.mdx
index a5bfbb31..1ecdb1cc 100644
--- a/docs/docs/core-components/agents.mdx
+++ b/docs/docs/core-components/agents.mdx
@@ -9,7 +9,7 @@ import TabItem from '@theme/TabItem';
import PartialModifyFlows from '@site/docs/_partial-modify-flows.mdx';
-OpenRAG leverages Langflow's Agent component to power the OpenRAG Open Search Agent flow.
+OpenRAG leverages Langflow's Agent component to power the OpenRAG OpenSearch Agent flow.
This flow intelligently chats with your knowledge by embedding your query, comparing it the vector database embeddings, and generating a response with the LLM.
@@ -28,9 +28,9 @@ In an agentic context, tools are functions that the agent can run to perform tas
-## Use the OpenRAG Open Search Agent flow
+## Use the OpenRAG OpenSearch Agent flow
-If you've chatted with your knowledge in OpenRAG, you've already experienced the OpenRAG Open Search Agent chat flow.
+If you've chatted with your knowledge in OpenRAG, you've already experienced the OpenRAG OpenSearch Agent chat flow.
To view the flow, click **Settings**, and then click **Edit in Langflow**.
This flow contains seven components:
@@ -39,7 +39,7 @@ The Agent behaves according to the prompt in the **Agent Instructions** field.
* The Chat Input component is connected to the Agent component's Input port. This allows to flow to be triggered by an incoming prompt from a user or application.
* The OpenSearch component is connected to the Agent component's Tools port. The agent may not use this database for every request; the agent only uses this connection if it decides the knowledge can help respond to the prompt.
* The Language Model component is connected to the Agent component's Language Model port. The agent uses the connected LLM to reason through the request sent through Chat Input.
-* The Embedding Model component is connected to the Open Search component's Embedding port. This component converts text queries into vector representations that are compared with document embeddings stored in OpenSearch for semantic similarity matching. This gives your Agent's queries context.
+* The Embedding Model component is connected to the OpenSearch component's Embedding port. This component converts text queries into vector representations that are compared with document embeddings stored in OpenSearch for semantic similarity matching. This gives your Agent's queries context.
* The Text Input component is populated with the global variable `OPENRAG-QUERY-FILTER`.
This filter is the Knowledge filter, and filters which knowledge sources to search through.
* The Agent component's Output port is connected to the Chat Output component, which returns the final response to the user or application.
diff --git a/docs/docs/get-started/quickstart.mdx b/docs/docs/get-started/quickstart.mdx
index 0e53534f..748a8078 100644
--- a/docs/docs/get-started/quickstart.mdx
+++ b/docs/docs/get-started/quickstart.mdx
@@ -16,7 +16,7 @@ Get started with OpenRAG by loading your knowledge, swapping out your language m
## Find your way around
1. In OpenRAG, click **Chat**.
- The chat is powered by the OpenRAG Open Search Agent.
+ The chat is powered by the OpenRAG OpenSearch Agent.
For more information, see [Langflow Agents](/agents).
2. Ask `What documents are available to you?`
The agent responds with a message summarizing the documents that OpenRAG loads by default, which are PDFs about evaluating data quality when using LLMs in health care.
@@ -43,9 +43,9 @@ In this example, you'll try a different LLM to demonstrate how the Agent's respo
1. To edit the Agent's behavior, click **Edit in Langflow**.
2. OpenRAG warns you that you're entering Langflow. Click **Proceed**.
-3. The OpenRAG Open Search Agent flow appears.
+3. The OpenRAG OpenSearch Agent flow appears.
-
+
4. In the **Language Model** component, under **Model Provider**, select **Anthropic**.
:::note
From e0015f35db0113a82017c9c96dd975d25153c198 Mon Sep 17 00:00:00 2001
From: Lucas Oliveira <62335616+lucaseduoli@users.noreply.github.com>
Date: Fri, 26 Sep 2025 12:04:17 -0300
Subject: [PATCH 08/10] fix: update onboarding design, make opensearch index be
initialized after onboarding, make flow reset change the models to the
provider chosen (#100)
* changed tooltip stype
* added start on label wrapper
* changed switch to checkbox on openai onboarding and changed copies
* made border be red when api key is invalid
* Added embedding configuration after onboarding
* changed openrag ingest docling to have same embedding model component as other flows
* changed flows service to get flow by id, not by path
* modify reset_langflow to also put right embedding model
* added endpoint and project id to provider config
* added replacing the model with the provider model when resetting
* Moved consts to settings.py
* raise when flow_id is not found
---
flows/openrag_ingest_docling.json | 24 +-
frontend/components/label-wrapper.tsx | 17 +-
frontend/components/ui/tooltip.tsx | 2 +-
.../components/openai-onboarding.tsx | 17 +-
src/api/settings.py | 13 +
src/config/config_manager.py | 6 +
src/config/settings.py | 25 ++
src/main.py | 24 +-
src/services/flows_service.py | 222 ++++++++++++------
src/utils/embeddings.py | 64 +++++
10 files changed, 317 insertions(+), 97 deletions(-)
create mode 100644 src/utils/embeddings.py
diff --git a/flows/openrag_ingest_docling.json b/flows/openrag_ingest_docling.json
index 889f8425..cce73398 100644
--- a/flows/openrag_ingest_docling.json
+++ b/flows/openrag_ingest_docling.json
@@ -95,7 +95,7 @@
"data": {
"sourceHandle": {
"dataType": "EmbeddingModel",
- "id": "EmbeddingModel-cxG9r",
+ "id": "EmbeddingModel-eZ6bT",
"name": "embeddings",
"output_types": [
"Embeddings"
@@ -110,10 +110,10 @@
"type": "other"
}
},
- "id": "xy-edge__EmbeddingModel-cxG9r{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-cxG9rœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-OpenSearchHybrid-XtKoA{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchHybrid-XtKoAœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}",
+ "id": "xy-edge__EmbeddingModel-eZ6bT{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-eZ6bTœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}-OpenSearchHybrid-XtKoA{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchHybrid-XtKoAœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}",
"selected": false,
- "source": "EmbeddingModel-cxG9r",
- "sourceHandle": "{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-cxG9rœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}",
+ "source": "EmbeddingModel-eZ6bT",
+ "sourceHandle": "{œdataTypeœ:œEmbeddingModelœ,œidœ:œEmbeddingModel-eZ6bTœ,œnameœ:œembeddingsœ,œoutput_typesœ:[œEmbeddingsœ]}",
"target": "OpenSearchHybrid-XtKoA",
"targetHandle": "{œfieldNameœ:œembeddingœ,œidœ:œOpenSearchHybrid-XtKoAœ,œinputTypesœ:[œEmbeddingsœ],œtypeœ:œotherœ}"
}
@@ -1631,7 +1631,7 @@
},
{
"data": {
- "id": "EmbeddingModel-cxG9r",
+ "id": "EmbeddingModel-eZ6bT",
"node": {
"base_classes": [
"Embeddings"
@@ -1657,7 +1657,7 @@
],
"frozen": false,
"icon": "binary",
- "last_updated": "2025-09-24T16:02:07.998Z",
+ "last_updated": "2025-09-22T15:54:52.885Z",
"legacy": false,
"metadata": {
"code_hash": "93faf11517da",
@@ -1738,7 +1738,7 @@
"show": true,
"title_case": false,
"type": "str",
- "value": ""
+ "value": "OPENAI_API_KEY"
},
"chunk_size": {
"_input_type": "IntInput",
@@ -1926,16 +1926,16 @@
"type": "EmbeddingModel"
},
"dragging": false,
- "id": "EmbeddingModel-cxG9r",
+ "id": "EmbeddingModel-eZ6bT",
"measured": {
- "height": 366,
+ "height": 369,
"width": 320
},
"position": {
- "x": 1743.8608432729177,
- "y": 1808.780792406514
+ "x": 1726.6943524438122,
+ "y": 1800.5330404375484
},
- "selected": false,
+ "selected": true,
"type": "genericNode"
}
],
diff --git a/frontend/components/label-wrapper.tsx b/frontend/components/label-wrapper.tsx
index ab785c5c..691b7726 100644
--- a/frontend/components/label-wrapper.tsx
+++ b/frontend/components/label-wrapper.tsx
@@ -10,18 +10,25 @@ export function LabelWrapper({
id,
required,
flex,
+ start,
children,
}: {
label: string;
description?: string;
- helperText?: string;
+ helperText?: string | React.ReactNode;
id: string;
required?: boolean;
flex?: boolean;
+ start?: boolean;
children: React.ReactNode;
}) {
return (
-
+ Reuse the key from your environment config.
+
+ Uncheck to enter a different key.
+ >
+ }
flex
+ start
>
-
@@ -86,6 +94,7 @@ export function OpenAIOnboarding({
)}
{modelsError && (
-
+
Invalid OpenAI API key. Verify or replace the key.
)}
diff --git a/src/api/settings.py b/src/api/settings.py
index 3e242c4b..c2c7cbd0 100644
--- a/src/api/settings.py
+++ b/src/api/settings.py
@@ -556,6 +556,19 @@ async def onboarding(request, flows_service):
)
# Continue even if setting global variables fails
+ # Initialize the OpenSearch index now that we have the embedding model configured
+ try:
+ # Import here to avoid circular imports
+ from main import init_index
+
+ logger.info("Initializing OpenSearch index after onboarding configuration")
+ await init_index()
+ logger.info("OpenSearch index initialization completed successfully")
+ except Exception as e:
+ logger.error("Failed to initialize OpenSearch index after onboarding", error=str(e))
+ # Don't fail the entire onboarding process if index creation fails
+ # The application can still work, but document operations may fail
+
# Handle sample data ingestion if requested
if should_ingest_sample_data:
try:
diff --git a/src/config/config_manager.py b/src/config/config_manager.py
index 055d48a7..0b814470 100644
--- a/src/config/config_manager.py
+++ b/src/config/config_manager.py
@@ -16,6 +16,8 @@ class ProviderConfig:
model_provider: str = "openai" # openai, anthropic, etc.
api_key: str = ""
+ endpoint: str = "" # For providers like Watson/IBM that need custom endpoints
+ project_id: str = "" # For providers like Watson/IBM that need project IDs
@dataclass
@@ -129,6 +131,10 @@ class ConfigManager:
config_data["provider"]["model_provider"] = os.getenv("MODEL_PROVIDER")
if os.getenv("PROVIDER_API_KEY"):
config_data["provider"]["api_key"] = os.getenv("PROVIDER_API_KEY")
+ if os.getenv("PROVIDER_ENDPOINT"):
+ config_data["provider"]["endpoint"] = os.getenv("PROVIDER_ENDPOINT")
+ if os.getenv("PROVIDER_PROJECT_ID"):
+ config_data["provider"]["project_id"] = os.getenv("PROVIDER_PROJECT_ID")
# Backward compatibility for OpenAI
if os.getenv("OPENAI_API_KEY"):
config_data["provider"]["api_key"] = os.getenv("OPENAI_API_KEY")
diff --git a/src/config/settings.py b/src/config/settings.py
index 5f9b189d..3bf1e6cf 100644
--- a/src/config/settings.py
+++ b/src/config/settings.py
@@ -78,6 +78,31 @@ INDEX_NAME = "documents"
VECTOR_DIM = 1536
EMBED_MODEL = "text-embedding-3-small"
+OPENAI_EMBEDDING_DIMENSIONS = {
+ "text-embedding-3-small": 1536,
+ "text-embedding-3-large": 3072,
+ "text-embedding-ada-002": 1536,
+ }
+
+OLLAMA_EMBEDDING_DIMENSIONS = {
+ "nomic-embed-text": 768,
+ "all-minilm": 384,
+ "mxbai-embed-large": 1024,
+}
+
+WATSONX_EMBEDDING_DIMENSIONS = {
+# IBM Models
+"ibm/granite-embedding-107m-multilingual": 384,
+"ibm/granite-embedding-278m-multilingual": 1024,
+"ibm/slate-125m-english-rtrvr": 768,
+"ibm/slate-125m-english-rtrvr-v2": 768,
+"ibm/slate-30m-english-rtrvr": 384,
+"ibm/slate-30m-english-rtrvr-v2": 384,
+# Third Party Models
+"intfloat/multilingual-e5-large": 1024,
+"sentence-transformers/all-minilm-l6-v2": 384,
+}
+
INDEX_BODY = {
"settings": {
"index": {"knn": True},
diff --git a/src/main.py b/src/main.py
index 90add401..69f2ad9f 100644
--- a/src/main.py
+++ b/src/main.py
@@ -2,6 +2,7 @@
from connectors.langflow_connector_service import LangflowConnectorService
from connectors.service import ConnectorService
from services.flows_service import FlowsService
+from utils.embeddings import create_dynamic_index_body
from utils.logging_config import configure_from_env, get_logger
configure_from_env()
@@ -52,11 +53,11 @@ from auth_middleware import optional_auth, require_auth
from config.settings import (
DISABLE_INGEST_WITH_LANGFLOW,
EMBED_MODEL,
- INDEX_BODY,
INDEX_NAME,
SESSION_SECRET,
clients,
is_no_auth_mode,
+ get_openrag_config,
)
from services.auth_service import AuthService
from services.langflow_mcp_service import LangflowMCPService
@@ -81,7 +82,6 @@ logger.info(
cuda_version=torch.version.cuda,
)
-
async def wait_for_opensearch():
"""Wait for OpenSearch to be ready with retries"""
max_retries = 30
@@ -132,12 +132,19 @@ async def init_index():
"""Initialize OpenSearch index and security roles"""
await wait_for_opensearch()
+ # Get the configured embedding model from user configuration
+ config = get_openrag_config()
+ embedding_model = config.knowledge.embedding_model
+
+ # Create dynamic index body based on the configured embedding model
+ dynamic_index_body = create_dynamic_index_body(embedding_model)
+
# Create documents index
if not await clients.opensearch.indices.exists(index=INDEX_NAME):
- await clients.opensearch.indices.create(index=INDEX_NAME, body=INDEX_BODY)
- logger.info("Created OpenSearch index", index_name=INDEX_NAME)
+ await clients.opensearch.indices.create(index=INDEX_NAME, body=dynamic_index_body)
+ logger.info("Created OpenSearch index", index_name=INDEX_NAME, embedding_model=embedding_model)
else:
- logger.info("Index already exists, skipping creation", index_name=INDEX_NAME)
+ logger.info("Index already exists, skipping creation", index_name=INDEX_NAME, embedding_model=embedding_model)
# Create knowledge filters index
knowledge_filter_index_name = "knowledge_filters"
@@ -391,7 +398,12 @@ async def _ingest_default_documents_openrag(services, file_paths):
async def startup_tasks(services):
"""Startup tasks"""
logger.info("Starting startup tasks")
- await init_index()
+ # Only initialize basic OpenSearch connection, not the index
+ # Index will be created after onboarding when we know the embedding model
+ await wait_for_opensearch()
+
+ # Configure alerting security
+ await configure_alerting_security()
async def initialize_services():
diff --git a/src/services/flows_service.py b/src/services/flows_service.py
index 0d7a7bc8..7397cf6b 100644
--- a/src/services/flows_service.py
+++ b/src/services/flows_service.py
@@ -1,3 +1,4 @@
+import asyncio
from config.settings import (
NUDGES_FLOW_ID,
LANGFLOW_URL,
@@ -19,6 +20,7 @@ from config.settings import (
WATSONX_LLM_COMPONENT_ID,
OLLAMA_EMBEDDING_COMPONENT_ID,
OLLAMA_LLM_COMPONENT_ID,
+ get_openrag_config,
)
import json
import os
@@ -29,6 +31,74 @@ logger = get_logger(__name__)
class FlowsService:
+ def __init__(self):
+ # Cache for flow file mappings to avoid repeated filesystem scans
+ self._flow_file_cache = {}
+
+ def _get_flows_directory(self):
+ """Get the flows directory path"""
+ current_file_dir = os.path.dirname(os.path.abspath(__file__)) # src/services/
+ src_dir = os.path.dirname(current_file_dir) # src/
+ project_root = os.path.dirname(src_dir) # project root
+ return os.path.join(project_root, "flows")
+
+ def _find_flow_file_by_id(self, flow_id: str):
+ """
+ Scan the flows directory and find the JSON file that contains the specified flow ID.
+
+ Args:
+ flow_id: The flow ID to search for
+
+ Returns:
+ str: The path to the flow file, or None if not found
+ """
+ if not flow_id:
+ raise ValueError("flow_id is required")
+
+ # Check cache first
+ if flow_id in self._flow_file_cache:
+ cached_path = self._flow_file_cache[flow_id]
+ if os.path.exists(cached_path):
+ return cached_path
+ else:
+ # Remove stale cache entry
+ del self._flow_file_cache[flow_id]
+
+ flows_dir = self._get_flows_directory()
+
+ if not os.path.exists(flows_dir):
+ logger.warning(f"Flows directory not found: {flows_dir}")
+ return None
+
+ # Scan all JSON files in the flows directory
+ try:
+ for filename in os.listdir(flows_dir):
+ if not filename.endswith('.json'):
+ continue
+
+ file_path = os.path.join(flows_dir, filename)
+
+ try:
+ with open(file_path, 'r') as f:
+ flow_data = json.load(f)
+
+ # Check if this file contains the flow we're looking for
+ if flow_data.get('id') == flow_id:
+ # Cache the result
+ self._flow_file_cache[flow_id] = file_path
+ logger.info(f"Found flow {flow_id} in file: {filename}")
+ return file_path
+
+ except (json.JSONDecodeError, FileNotFoundError) as e:
+ logger.warning(f"Error reading flow file {filename}: {e}")
+ continue
+
+ except Exception as e:
+ logger.error(f"Error scanning flows directory: {e}")
+ return None
+
+ logger.warning(f"Flow with ID {flow_id} not found in flows directory")
+ return None
async def reset_langflow_flow(self, flow_type: str):
"""Reset a Langflow flow by uploading the corresponding JSON file
@@ -41,59 +111,35 @@ class FlowsService:
if not LANGFLOW_URL:
raise ValueError("LANGFLOW_URL environment variable is required")
- # Determine flow file and ID based on type
+ # Determine flow ID based on type
if flow_type == "nudges":
- flow_file = "flows/openrag_nudges.json"
flow_id = NUDGES_FLOW_ID
elif flow_type == "retrieval":
- flow_file = "flows/openrag_agent.json"
flow_id = LANGFLOW_CHAT_FLOW_ID
elif flow_type == "ingest":
- flow_file = "flows/ingestion_flow.json"
flow_id = LANGFLOW_INGEST_FLOW_ID
else:
raise ValueError(
"flow_type must be either 'nudges', 'retrieval', or 'ingest'"
)
+ if not flow_id:
+ raise ValueError(f"Flow ID not configured for flow_type '{flow_type}'")
+
+ # Dynamically find the flow file by ID
+ flow_path = self._find_flow_file_by_id(flow_id)
+ if not flow_path:
+ raise FileNotFoundError(f"Flow file not found for flow ID: {flow_id}")
+
# Load flow JSON file
try:
- # Get the project root directory (go up from src/services/ to project root)
- # __file__ is src/services/chat_service.py
- # os.path.dirname(__file__) is src/services/
- # os.path.dirname(os.path.dirname(__file__)) is src/
- # os.path.dirname(os.path.dirname(os.path.dirname(__file__))) is project root
- current_file_dir = os.path.dirname(
- os.path.abspath(__file__)
- ) # src/services/
- src_dir = os.path.dirname(current_file_dir) # src/
- project_root = os.path.dirname(src_dir) # project root
- flow_path = os.path.join(project_root, flow_file)
-
- if not os.path.exists(flow_path):
- # List contents of project root to help debug
- try:
- contents = os.listdir(project_root)
- logger.info(f"Project root contents: {contents}")
-
- flows_dir = os.path.join(project_root, "flows")
- if os.path.exists(flows_dir):
- flows_contents = os.listdir(flows_dir)
- logger.info(f"Flows directory contents: {flows_contents}")
- else:
- logger.info("Flows directory does not exist")
- except Exception as e:
- logger.error(f"Error listing directory contents: {e}")
-
- raise FileNotFoundError(f"Flow file not found at: {flow_path}")
-
with open(flow_path, "r") as f:
flow_data = json.load(f)
- logger.info(f"Successfully loaded flow data from {flow_file}")
+ logger.info(f"Successfully loaded flow data for {flow_type} from {os.path.basename(flow_path)}")
+ except json.JSONDecodeError as e:
+ raise ValueError(f"Invalid JSON in flow file {flow_path}: {e}")
except FileNotFoundError:
raise ValueError(f"Flow file not found: {flow_path}")
- except json.JSONDecodeError as e:
- raise ValueError(f"Invalid JSON in flow file {flow_file}: {e}")
# Make PATCH request to Langflow API to update the flow using shared client
try:
@@ -106,8 +152,54 @@ class FlowsService:
logger.info(
f"Successfully reset {flow_type} flow",
flow_id=flow_id,
- flow_file=flow_file,
+ flow_file=os.path.basename(flow_path),
)
+
+ # Now update the flow with current configuration settings
+ try:
+ config = get_openrag_config()
+
+ # Check if configuration has been edited (onboarding completed)
+ if config.edited:
+ logger.info(f"Updating {flow_type} flow with current configuration settings")
+
+ provider = config.provider.model_provider.lower()
+
+ # Step 1: Assign model provider (replace components) if not OpenAI
+ if provider != "openai":
+ logger.info(f"Assigning {provider} components to {flow_type} flow")
+ provider_result = await self.assign_model_provider(provider)
+
+ if not provider_result.get("success"):
+ logger.warning(f"Failed to assign {provider} components: {provider_result.get('error', 'Unknown error')}")
+ # Continue anyway, maybe just value updates will work
+
+ # Step 2: Update model values for the specific flow being reset
+ single_flow_config = [{
+ "name": flow_type,
+ "flow_id": flow_id,
+ }]
+
+ logger.info(f"Updating {flow_type} flow model values")
+ update_result = await self.change_langflow_model_value(
+ provider=provider,
+ embedding_model=config.knowledge.embedding_model,
+ llm_model=config.agent.llm_model,
+ endpoint=config.provider.endpoint if config.provider.endpoint else None,
+ flow_configs=single_flow_config
+ )
+
+ if update_result.get("success"):
+ logger.info(f"Successfully updated {flow_type} flow with current configuration")
+ else:
+ logger.warning(f"Failed to update {flow_type} flow with current configuration: {update_result.get('error', 'Unknown error')}")
+ else:
+ logger.info(f"Configuration not yet edited (onboarding not completed), skipping model updates for {flow_type} flow")
+
+ except Exception as e:
+ logger.error(f"Error updating {flow_type} flow with current configuration", error=str(e))
+ # Don't fail the entire reset operation if configuration update fails
+
return {
"success": True,
"message": f"Successfully reset {flow_type} flow",
@@ -155,11 +247,10 @@ class FlowsService:
logger.info(f"Assigning {provider} components")
- # Define flow configurations
+ # Define flow configurations (removed hardcoded file paths)
flow_configs = [
{
"name": "nudges",
- "file": "flows/openrag_nudges.json",
"flow_id": NUDGES_FLOW_ID,
"embedding_id": OPENAI_EMBEDDING_COMPONENT_ID,
"llm_id": OPENAI_LLM_COMPONENT_ID,
@@ -167,7 +258,6 @@ class FlowsService:
},
{
"name": "retrieval",
- "file": "flows/openrag_agent.json",
"flow_id": LANGFLOW_CHAT_FLOW_ID,
"embedding_id": OPENAI_EMBEDDING_COMPONENT_ID,
"llm_id": OPENAI_LLM_COMPONENT_ID,
@@ -175,7 +265,6 @@ class FlowsService:
},
{
"name": "ingest",
- "file": "flows/ingestion_flow.json",
"flow_id": LANGFLOW_INGEST_FLOW_ID,
"embedding_id": OPENAI_EMBEDDING_COMPONENT_ID,
"llm_id": None, # Ingestion flow might not have LLM
@@ -272,7 +361,6 @@ class FlowsService:
async def _update_flow_components(self, config, llm_template, embedding_template, llm_text_template):
"""Update components in a specific flow"""
flow_name = config["name"]
- flow_file = config["file"]
flow_id = config["flow_id"]
old_embedding_id = config["embedding_id"]
old_llm_id = config["llm_id"]
@@ -281,14 +369,11 @@ class FlowsService:
new_llm_id = llm_template["data"]["id"]
new_embedding_id = embedding_template["data"]["id"]
new_llm_text_id = llm_text_template["data"]["id"]
- # Get the project root directory
- current_file_dir = os.path.dirname(os.path.abspath(__file__))
- src_dir = os.path.dirname(current_file_dir)
- project_root = os.path.dirname(src_dir)
- flow_path = os.path.join(project_root, flow_file)
- if not os.path.exists(flow_path):
- raise FileNotFoundError(f"Flow file not found at: {flow_path}")
+ # Dynamically find the flow file by ID
+ flow_path = self._find_flow_file_by_id(flow_id)
+ if not flow_path:
+ raise FileNotFoundError(f"Flow file not found for flow ID: {flow_id}")
# Load flow JSON
with open(flow_path, "r") as f:
@@ -527,16 +612,17 @@ class FlowsService:
return False
async def change_langflow_model_value(
- self, provider: str, embedding_model: str, llm_model: str, endpoint: str = None
+ self, provider: str, embedding_model: str, llm_model: str, endpoint: str = None, flow_configs: list = None
):
"""
- Change dropdown values for provider-specific components across all flows
+ Change dropdown values for provider-specific components across flows
Args:
provider: The provider ("watsonx", "ollama", "openai")
embedding_model: The embedding model name to set
llm_model: The LLM model name to set
endpoint: The endpoint URL (required for watsonx/ibm provider)
+ flow_configs: Optional list of specific flow configs to update. If None, updates all flows.
Returns:
dict: Success/error response with details for each flow
@@ -552,24 +638,22 @@ class FlowsService:
f"Changing dropdown values for provider {provider}, embedding: {embedding_model}, llm: {llm_model}, endpoint: {endpoint}"
)
- # Define flow configurations with provider-specific component IDs
- flow_configs = [
- {
- "name": "nudges",
- "file": "flows/openrag_nudges.json",
- "flow_id": NUDGES_FLOW_ID,
- },
- {
- "name": "retrieval",
- "file": "flows/openrag_agent.json",
- "flow_id": LANGFLOW_CHAT_FLOW_ID,
- },
- {
- "name": "ingest",
- "file": "flows/ingestion_flow.json",
- "flow_id": LANGFLOW_INGEST_FLOW_ID,
- },
- ]
+ # Use provided flow_configs or default to all flows
+ if flow_configs is None:
+ flow_configs = [
+ {
+ "name": "nudges",
+ "flow_id": NUDGES_FLOW_ID,
+ },
+ {
+ "name": "retrieval",
+ "flow_id": LANGFLOW_CHAT_FLOW_ID,
+ },
+ {
+ "name": "ingest",
+ "flow_id": LANGFLOW_INGEST_FLOW_ID,
+ },
+ ]
# Determine target component IDs based on provider
target_embedding_id, target_llm_id, target_llm_text_id = self._get_provider_component_ids(
diff --git a/src/utils/embeddings.py b/src/utils/embeddings.py
new file mode 100644
index 00000000..f3c902e7
--- /dev/null
+++ b/src/utils/embeddings.py
@@ -0,0 +1,64 @@
+from config.settings import OLLAMA_EMBEDDING_DIMENSIONS, OPENAI_EMBEDDING_DIMENSIONS, VECTOR_DIM, WATSONX_EMBEDDING_DIMENSIONS
+from utils.logging_config import get_logger
+
+
+logger = get_logger(__name__)
+
+def get_embedding_dimensions(model_name: str) -> int:
+ """Get the embedding dimensions for a given model name."""
+
+ # Check all model dictionaries
+ all_models = {**OPENAI_EMBEDDING_DIMENSIONS, **OLLAMA_EMBEDDING_DIMENSIONS, **WATSONX_EMBEDDING_DIMENSIONS}
+
+ if model_name in all_models:
+ dimensions = all_models[model_name]
+ logger.info(f"Found dimensions for model '{model_name}': {dimensions}")
+ return dimensions
+
+ logger.warning(
+ f"Unknown embedding model '{model_name}', using default dimensions: {VECTOR_DIM}"
+ )
+ return VECTOR_DIM
+
+
+def create_dynamic_index_body(embedding_model: str) -> dict:
+ """Create a dynamic index body configuration based on the embedding model."""
+ dimensions = get_embedding_dimensions(embedding_model)
+
+ return {
+ "settings": {
+ "index": {"knn": True},
+ "number_of_shards": 1,
+ "number_of_replicas": 1,
+ },
+ "mappings": {
+ "properties": {
+ "document_id": {"type": "keyword"},
+ "filename": {"type": "keyword"},
+ "mimetype": {"type": "keyword"},
+ "page": {"type": "integer"},
+ "text": {"type": "text"},
+ "chunk_embedding": {
+ "type": "knn_vector",
+ "dimension": dimensions,
+ "method": {
+ "name": "disk_ann",
+ "engine": "jvector",
+ "space_type": "l2",
+ "parameters": {"ef_construction": 100, "m": 16},
+ },
+ },
+ "source_url": {"type": "keyword"},
+ "connector_type": {"type": "keyword"},
+ "owner": {"type": "keyword"},
+ "allowed_users": {"type": "keyword"},
+ "allowed_groups": {"type": "keyword"},
+ "user_permissions": {"type": "object"},
+ "group_permissions": {"type": "object"},
+ "created_time": {"type": "date"},
+ "modified_time": {"type": "date"},
+ "indexed_time": {"type": "date"},
+ "metadata": {"type": "object"},
+ }
+ },
+ }
\ No newline at end of file
From a67aaaacd601c54198a43c169cc27396317b8f2f Mon Sep 17 00:00:00 2001
From: Mendon Kissling <59585235+mendonk@users.noreply.github.com>
Date: Fri, 26 Sep 2025 12:14:42 -0400
Subject: [PATCH 09/10] remove-multiple-map
---
docs/docs/core-components/knowledge.mdx | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/docs/core-components/knowledge.mdx b/docs/docs/core-components/knowledge.mdx
index d8160f8b..b5507eef 100644
--- a/docs/docs/core-components/knowledge.mdx
+++ b/docs/docs/core-components/knowledge.mdx
@@ -30,7 +30,7 @@ OpenRAG supports knowledge ingestion through direct file uploads and OAuth conne
The **Knowledge Ingest** flow uses Langflow's [**File** component](https://docs.langflow.org/components-data#file) to split and embed files loaded from your local machine into the OpenSearch database.
-The default path to your local folder is mounted from the `./documents` folder in your OpenRAG project directory to the `/app/documents/` directory inside the Docker container. Files added to the host or the container will be visible in both locations. To configure this location, modify the **Documents Paths** variable in either the TUI's [Advanced Setup](/install#advanced-setup) or in the `.env` used by Docker Compose. Add multiple paths in a comma-separated list with no spaces. For example, `./documents,/Users/username/Documents`.
+The default path to your local folder is mounted from the `./documents` folder in your OpenRAG project directory to the `/app/documents/` directory inside the Docker container. Files added to the host or the container will be visible in both locations. To configure this location, modify the **Documents Paths** variable in either the TUI's [Advanced Setup](/install#advanced-setup) or in the `.env` used by Docker Compose.
To load and process a single file from the mapped location, click **Add Knowledge**, and then click **Add File**.
The file is loaded into your OpenSearch database, and appears in the Knowledge page.
From f424e08801b34054f82d1b3586e02d4f2872d3fe Mon Sep 17 00:00:00 2001
From: Mendon Kissling <59585235+mendonk@users.noreply.github.com>
Date: Fri, 26 Sep 2025 13:35:42 -0400
Subject: [PATCH 10/10] no-static-default-onboarding-behavior
---
docs/docs/core-components/knowledge.mdx | 13 ++++++-------
1 file changed, 6 insertions(+), 7 deletions(-)
diff --git a/docs/docs/core-components/knowledge.mdx b/docs/docs/core-components/knowledge.mdx
index b5507eef..255b0b68 100644
--- a/docs/docs/core-components/knowledge.mdx
+++ b/docs/docs/core-components/knowledge.mdx
@@ -134,15 +134,14 @@ A new filter is created with default settings that match everything.
## OpenRAG default configuration
-OpenRAG creates a specialized OpenSearch index called `documents` with the values defined at `src/config/settings.py`.
-* **Vector Dimensions**: 1536-dimensional embeddings using OpenAI's `text-embedding-3-small` model.
-* **KNN Vector Type**: Uses `knn_vector` field type with `disk_ann` method and `jvector` engine.
-* **Distance Metric**: L2 (Euclidean) distance for vector similarity.
-* **Performance Optimization**: Configured with `ef_construction: 100` and `m: 16` parameters.
-
-OpenRAG supports hybrid search, which combines semantic and keyword search.
+OpenRAG automatically detects and configures the correct vector dimensions for embedding models, ensuring optimal search performance and compatibility.
+The complete list of supported models is available at [/src/services/models_service.py](https://github.com/langflow-ai/openrag/blob/main/src/services/models_service.py).
+You can use custom embedding models by specifying them in your configuration.
+If you use an unknown embedding model, OpenRAG will automatically fall back to `1536` dimensions and log a warning. The system will continue to work, but search quality may be affected if the actual model dimensions differ from `1536`.
+The default embedding dimension is `1536` and the default model is `text-embedding-3-small`.
+For models with known vector dimensions, see [/src/config/settings.py](https://github.com/langflow-ai/openrag/blob/main/src/config/settings.py).
\ No newline at end of file