diff --git a/Dockerfile.langflow b/Dockerfile.langflow
index 6d868f14..2acb4877 100644
--- a/Dockerfile.langflow
+++ b/Dockerfile.langflow
@@ -33,8 +33,8 @@ RUN uv sync --frozen --no-install-project --no-editable --extra postgresql
# Build frontend
WORKDIR /app/src/frontend
-RUN npm ci && \
- npm run build && \
+RUN NODE_OPTIONS=--max_old_space_size=4096 npm ci && \
+ NODE_OPTIONS=--max_old_space_size=4096 npm run build && \
mkdir -p /app/src/backend/base/langflow/frontend && \
cp -r build/* /app/src/backend/base/langflow/frontend/
diff --git a/README.md b/README.md
index df1d6451..a0178f28 100644
--- a/README.md
+++ b/README.md
@@ -62,7 +62,7 @@ LANGFLOW_CHAT_FLOW_ID=your_chat_flow_id
LANGFLOW_INGEST_FLOW_ID=your_ingest_flow_id
NUDGES_FLOW_ID=your_nudges_flow_id
```
-See extended configuration, including ingestion and optional variables: [docs/configure/configuration.md](docs/docs/configure/configuration.md)
+See extended configuration, including ingestion and optional variables: [docs/reference/configuration.md](docs/docs/reference/configuration.md)
### 3. Start OpenRAG
```bash
diff --git a/config/config.example.yaml b/config/config.example.yaml
deleted file mode 100644
index 410025e7..00000000
--- a/config/config.example.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-# OpenRAG Configuration File
-provider:
- model_provider: "openai" # openai, anthropic, azure, etc.
- api_key: "your-api-key" # or use OPENAI_API_KEY env var
-
-knowledge:
- embedding_model: "text-embedding-3-small"
- chunk_size: 1000
- chunk_overlap: 200
- ocr: true
- picture_descriptions: false
-
-agent:
- llm_model: "gpt-4o-mini"
- system_prompt: "You are a helpful AI assistant..."
\ No newline at end of file
diff --git a/docker-compose-cpu.yml b/docker-compose-cpu.yml
index 9b0ff88b..d0de6ce9 100644
--- a/docker-compose-cpu.yml
+++ b/docker-compose-cpu.yml
@@ -74,7 +74,6 @@ services:
- ./documents:/app/documents:Z
- ./keys:/app/keys:Z
- ./flows:/app/flows:Z
- - ./config:/app/config:z
openrag-frontend:
image: phact/openrag-frontend:${OPENRAG_VERSION:-latest}
diff --git a/docker-compose.yml b/docker-compose.yml
index 34a5947f..daa921ae 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -73,7 +73,6 @@ services:
- ./documents:/app/documents:Z
- ./keys:/app/keys:Z
- ./flows:/app/flows:z
- - ./config:/app/config:z
gpus: all
openrag-frontend:
diff --git a/docs/docs/_partial-modify-flows.mdx b/docs/docs/_partial-modify-flows.mdx
index 852777e5..02ec1502 100644
--- a/docs/docs/_partial-modify-flows.mdx
+++ b/docs/docs/_partial-modify-flows.mdx
@@ -2,4 +2,4 @@ import Icon from "@site/src/components/icon/icon";
All flows included with OpenRAG are designed to be modular, performant, and provider-agnostic.
To modify a flow, click **Settings**, and click **Edit in Langflow**.
-Flows are edited in the same way as in the [Langflow visual editor](https://docs.langflow.org/concepts-overview).
\ No newline at end of file
+OpenRAG's visual editor is based on the [Langflow visual editor](https://docs.langflow.org/concepts-overview), so you can edit your flows to match your specific use case.
\ No newline at end of file
diff --git a/docs/docs/_partial-onboarding.mdx b/docs/docs/_partial-onboarding.mdx
new file mode 100644
index 00000000..44222371
--- /dev/null
+++ b/docs/docs/_partial-onboarding.mdx
@@ -0,0 +1,49 @@
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+## Application onboarding
+
+The first time you start OpenRAG, whether using the TUI or a `.env` file, you must complete application onboarding.
+
+Most values from onboarding can be changed later in the OpenRAG **Settings** page, but there are important restrictions.
+
+The **language model provider** and **embeddings model provider** can only be selected at onboarding, and you must use the same provider for your language model and embedding model.
+To change your provider selection later, you must completely reinstall OpenRAG.
+
+The **language model** can be changed later in **Settings**, but the **embeddings model** cannot be changed later.
+
+
+
+ 1. Enable **Get API key from environment variable** to automatically enter your key from the TUI-generated `.env` file.
+ 2. Under **Advanced settings**, select your **Embedding Model** and **Language Model**.
+ 3. To load 2 sample PDFs, enable **Sample dataset**.
+ This is recommended, but not required.
+ 4. Click **Complete**.
+ 5. Continue with the [Quickstart](/quickstart).
+
+
+
+ 1. Complete the fields for **watsonx.ai API Endpoint**, **IBM API key**, and **IBM Project ID**.
+ These values are found in your IBM watsonx deployment.
+ 2. Under **Advanced settings**, select your **Embedding Model** and **Language Model**.
+ 3. To load 2 sample PDFs, enable **Sample dataset**.
+ This is recommended, but not required.
+ 4. Click **Complete**.
+ 5. Continue with the [Quickstart](/quickstart).
+
+
+
+ :::tip
+ Ollama is not included with OpenRAG. To install Ollama, see the [Ollama documentation](https://docs.ollama.com/).
+ :::
+ 1. Enter your Ollama server's base URL address.
+ The default Ollama server address is `http://localhost:11434`.
+ OpenRAG automatically transforms `localhost` to access services outside of the container, and sends a test connection to your Ollama server to confirm connectivity.
+ 2. Select the **Embedding Model** and **Language Model** your Ollama server is running.
+ OpenRAG retrieves the available models from your Ollama server.
+ 3. To load 2 sample PDFs, enable **Sample dataset**.
+ This is recommended, but not required.
+ 4. Click **Complete**.
+ 5. Continue with the [Quickstart](/quickstart).
+
+
\ No newline at end of file
diff --git a/docs/docs/configure/configuration.mdx b/docs/docs/configure/configuration.mdx
deleted file mode 100644
index d8058254..00000000
--- a/docs/docs/configure/configuration.mdx
+++ /dev/null
@@ -1,110 +0,0 @@
----
-title: Configuration
-slug: /configure/configuration
----
-
-import PartialExternalPreview from '@site/docs/_partial-external-preview.mdx';
-
-
-
-OpenRAG supports multiple configuration methods with the following priority:
-
-1. **Environment Variables** (highest priority)
-2. **Configuration File** (`config.yaml`)
-3. **Langflow Flow Settings** (runtime override)
-4. **Default Values** (fallback)
-
-## Configuration File
-
-Create a `config.yaml` file in the project root to configure OpenRAG:
-
-```yaml
-# OpenRAG Configuration File
-provider:
- model_provider: "openai" # openai, anthropic, azure, etc.
- api_key: "your-api-key" # or use OPENAI_API_KEY env var
-
-knowledge:
- embedding_model: "text-embedding-3-small"
- chunk_size: 1000
- chunk_overlap: 200
- ocr: true
- picture_descriptions: false
-
-agent:
- llm_model: "gpt-4o-mini"
- system_prompt: "You are a helpful AI assistant..."
-```
-
-## Environment Variables
-
-Environment variables will override configuration file settings. You can still use `.env` files:
-
-```bash
-cp .env.example .env
-```
-
-## Required Variables
-
-| Variable | Description |
-| ----------------------------- | ------------------------------------------- |
-| `OPENAI_API_KEY` | Your OpenAI API key |
-| `OPENSEARCH_PASSWORD` | Password for OpenSearch admin user |
-| `LANGFLOW_SUPERUSER` | Langflow admin username |
-| `LANGFLOW_SUPERUSER_PASSWORD` | Langflow admin password |
-| `LANGFLOW_CHAT_FLOW_ID` | ID of your Langflow chat flow |
-| `LANGFLOW_INGEST_FLOW_ID` | ID of your Langflow ingestion flow |
-| `NUDGES_FLOW_ID` | ID of your Langflow nudges/suggestions flow |
-
-## Ingestion Configuration
-
-| Variable | Description |
-| ------------------------------ | ------------------------------------------------------ |
-| `DISABLE_INGEST_WITH_LANGFLOW` | Disable Langflow ingestion pipeline (default: `false`) |
-
-- `false` or unset: Uses Langflow pipeline (upload → ingest → delete)
-- `true`: Uses traditional OpenRAG processor for document ingestion
-
-## Optional Variables
-
-| Variable | Description |
-| ------------------------------------------------------------------------- | ------------------------------------------------------------------ |
-| `LANGFLOW_PUBLIC_URL` | Public URL for Langflow (default: `http://localhost:7860`) |
-| `GOOGLE_OAUTH_CLIENT_ID` / `GOOGLE_OAUTH_CLIENT_SECRET` | Google OAuth authentication |
-| `MICROSOFT_GRAPH_OAUTH_CLIENT_ID` / `MICROSOFT_GRAPH_OAUTH_CLIENT_SECRET` | Microsoft OAuth |
-| `WEBHOOK_BASE_URL` | Base URL for webhook endpoints |
-| `AWS_ACCESS_KEY_ID` / `AWS_SECRET_ACCESS_KEY` | AWS integrations |
-| `SESSION_SECRET` | Session management (default: auto-generated, change in production) |
-| `LANGFLOW_KEY` | Explicit Langflow API key (auto-generated if not provided) |
-| `LANGFLOW_SECRET_KEY` | Secret key for Langflow internal operations |
-
-## OpenRAG Configuration Variables
-
-These environment variables override settings in `config.yaml`:
-
-### Provider Settings
-
-| Variable | Description | Default |
-| ------------------ | ---------------------------------------- | -------- |
-| `MODEL_PROVIDER` | Model provider (openai, anthropic, etc.) | `openai` |
-| `PROVIDER_API_KEY` | API key for the model provider | |
-| `OPENAI_API_KEY` | OpenAI API key (backward compatibility) | |
-
-### Knowledge Settings
-
-| Variable | Description | Default |
-| ------------------------------ | --------------------------------------- | ------------------------ |
-| `EMBEDDING_MODEL` | Embedding model for vector search | `text-embedding-3-small` |
-| `CHUNK_SIZE` | Text chunk size for document processing | `1000` |
-| `CHUNK_OVERLAP` | Overlap between chunks | `200` |
-| `OCR_ENABLED` | Enable OCR for image processing | `true` |
-| `PICTURE_DESCRIPTIONS_ENABLED` | Enable picture descriptions | `false` |
-
-### Agent Settings
-
-| Variable | Description | Default |
-| --------------- | --------------------------------- | ------------------------ |
-| `LLM_MODEL` | Language model for the chat agent | `gpt-4o-mini` |
-| `SYSTEM_PROMPT` | System prompt for the agent | Default assistant prompt |
-
-See `.env.example` for a complete list with descriptions, and `docker-compose*.yml` for runtime usage.
diff --git a/docs/docs/core-components/agents.mdx b/docs/docs/core-components/agents.mdx
index abfedc8a..3ee4617b 100644
--- a/docs/docs/core-components/agents.mdx
+++ b/docs/docs/core-components/agents.mdx
@@ -1,5 +1,5 @@
---
-title: Agents powered by Langflow
+title: Langflow Agents
slug: /agents
---
@@ -13,9 +13,13 @@ import PartialExternalPreview from '@site/docs/_partial-external-preview.mdx';
OpenRAG leverages Langflow's Agent component to power the OpenRAG OpenSearch Agent flow.
-This flow intelligently chats with your knowledge by embedding your query, comparing it the vector database embeddings, and generating a response with the LLM.
+[Flows](https://docs.langflow.org/concepts-overview) in Langflow are functional representations of application workflows, with multiple [component](https://docs.langflow.org/concepts-components) nodes connected as single steps in a workflow.
-The Agent component shines here in its ability to make decisions on not only what query should be sent, but when a query is necessary to solve the problem at hand.
+In the OpenRAG OpenSearch Agent flow, components like the Langflow [**Agent** component](https://docs.langflow.org/agents) and [**OpenSearch** component](https://docs.langflow.org/bundles-elastic#opensearch) are connected to intelligently chat with your knowledge by embedding your query, comparing it the vector database embeddings, and generating a response with the LLM.
+
+
+
+The Agent component shines here in its ability to make decisions on not only what query should be sent, but when a query is necessary to solve the problem at hand.
How do agents work?
@@ -33,22 +37,32 @@ In an agentic context, tools are functions that the agent can run to perform tas
## Use the OpenRAG OpenSearch Agent flow
If you've chatted with your knowledge in OpenRAG, you've already experienced the OpenRAG OpenSearch Agent chat flow.
-To view the flow, click **Settings**, and then click **Edit in Langflow**.
-This flow contains seven components:
+To switch OpenRAG over to the [Langflow visual editor](https://docs.langflow.org/concepts-overview) and view the OpenRAG OpenSearch Agentflow, click **Settings**, and then click **Edit in Langflow**.
+This flow contains seven components connected together to chat with your data:
-* The Agent component orchestrates the entire flow by deciding when to search the knowledge base, how to formulate search queries, and how to combine retrieved information with the user's question to generate a comprehensive response.
-The Agent behaves according to the prompt in the **Agent Instructions** field.
-* The Chat Input component is connected to the Agent component's Input port. This allows to flow to be triggered by an incoming prompt from a user or application.
-* The OpenSearch component is connected to the Agent component's Tools port. The agent may not use this database for every request; the agent only uses this connection if it decides the knowledge can help respond to the prompt.
-* The Language Model component is connected to the Agent component's Language Model port. The agent uses the connected LLM to reason through the request sent through Chat Input.
-* The Embedding Model component is connected to the OpenSearch component's Embedding port. This component converts text queries into vector representations that are compared with document embeddings stored in OpenSearch for semantic similarity matching. This gives your Agent's queries context.
-* The Text Input component is populated with the global variable `OPENRAG-QUERY-FILTER`.
-This filter is the Knowledge filter, and filters which knowledge sources to search through.
-* The Agent component's Output port is connected to the Chat Output component, which returns the final response to the user or application.
+* The [**Agent** component](https://docs.langflow.org/agents) orchestrates the entire flow by deciding when to search the knowledge base, how to formulate search queries, and how to combine retrieved information with the user's question to generate a comprehensive response.
+The **Agent** behaves according to the prompt in the **Agent Instructions** field.
+* The [**Chat Input** component](https://docs.langflow.org/components-io) is connected to the Agent component's Input port. This allows to flow to be triggered by an incoming prompt from a user or application.
+* The [**OpenSearch** component](https://docs.langflow.org/bundles-elastic#opensearch) is connected to the Agent component's Tools port. The agent may not use this database for every request; the agent only uses this connection if it decides the knowledge can help respond to the prompt.
+* The [**Language Model** component](https://docs.langflow.org/components-models) is connected to the Agent component's Language Model port. The agent uses the connected LLM to reason through the request sent through Chat Input.
+* The [**Embedding Model** component](https://docs.langflow.org/components-embedding-models) is connected to the OpenSearch component's Embedding port. This component converts text queries into vector representations that are compared with document embeddings stored in OpenSearch for semantic similarity matching. This gives your Agent's queries context.
+* The [**Text Input** component](https://docs.langflow.org/components-io) is populated with the global variable `OPENRAG-QUERY-FILTER`.
+This filter is the [Knowledge filter](/knowledge#create-knowledge-filters), and filters which knowledge sources to search through.
+* The **Agent** component's Output port is connected to the [**Chat Output** component](https://docs.langflow.org/components-io), which returns the final response to the user or application.
-For an example of changing out the agent's LLM in OpenRAG, see the [Quickstart](/quickstart#change-components).
+For an example of changing out the agent's language model in OpenRAG, see the [Quickstart](/quickstart#change-components).
To restore the flow to its initial state, in OpenRAG, click **Settings**, and then click **Restore Flow**.
-OpenRAG warns you that this discards all custom settings. Click **Restore** to restore the flow.
\ No newline at end of file
+OpenRAG warns you that this discards all custom settings. Click **Restore** to restore the flow.
+
+## Additional Langflow functionality
+
+Langflow includes features beyond Agents to help you integrate OpenRAG into your application, and all Langflow features are included in OpenRAG.
+
+* Langflow can serve your flows as an [MCP server](https://docs.langflow.org/mcp-server), or consume other MCP servers as an [MCP client](https://docs.langflow.org/mcp-client). Get started with the [MCP tutorial](https://docs.langflow.org/mcp-tutorial).
+
+* If you don't see the component you need, extend Langflow's functionality by creating [custom Python components](https://docs.langflow.org/components-custom-components).
+
+* Langflow offers component [bundles](https://docs.langflow.org/components-bundle-components) to integrate with many popular vector stores, AI/ML providers, and search APIs.
\ No newline at end of file
diff --git a/docs/docs/core-components/ingestion.mdx b/docs/docs/core-components/ingestion.mdx
index 08071158..d3ce81b0 100644
--- a/docs/docs/core-components/ingestion.mdx
+++ b/docs/docs/core-components/ingestion.mdx
@@ -46,7 +46,7 @@ If OpenRAG detects that the local machine is running on macOS, OpenRAG uses the
## Use OpenRAG default ingestion instead of Docling serve
-If you want to use OpenRAG's built-in pipeline instead of Docling serve, set `DISABLE_INGEST_WITH_LANGFLOW=true` in [Environment variables](/configure/configuration#ingestion-configuration).
+If you want to use OpenRAG's built-in pipeline instead of Docling serve, set `DISABLE_INGEST_WITH_LANGFLOW=true` in [Environment variables](/reference/configuration#document-processing).
The built-in pipeline still uses the Docling processor, but uses it directly without the Docling Serve API.
diff --git a/docs/docs/core-components/knowledge.mdx b/docs/docs/core-components/knowledge.mdx
index 6933b02f..d2a74ca4 100644
--- a/docs/docs/core-components/knowledge.mdx
+++ b/docs/docs/core-components/knowledge.mdx
@@ -12,19 +12,9 @@ import PartialExternalPreview from '@site/docs/_partial-external-preview.mdx';
OpenRAG uses [OpenSearch](https://docs.opensearch.org/latest/) for its vector-backed knowledge store.
+This is a specialized database for storing and retrieving embeddings, which helps your Agent efficiently find relevant information.
OpenSearch provides powerful hybrid search capabilities with enterprise-grade security and multi-tenancy support.
-## Explore knowledge
-
-The Knowledge page lists the documents OpenRAG has ingested into the OpenSearch vector database's `documents` index.
-
-To explore your current knowledge, click **Knowledge**.
-Click on a document to display the chunks derived from splitting the default documents into the vector database.
-
-Documents are processed with the default **Knowledge Ingest** flow, so if you want to split your documents differently, edit the **Knowledge Ingest** flow.
-
-
-
## Ingest knowledge
OpenRAG supports knowledge ingestion through direct file uploads and OAuth connectors.
@@ -33,7 +23,7 @@ OpenRAG supports knowledge ingestion through direct file uploads and OAuth conne
The **Knowledge Ingest** flow uses Langflow's [**File** component](https://docs.langflow.org/components-data#file) to split and embed files loaded from your local machine into the OpenSearch database.
-The default path to your local folder is mounted from the `./documents` folder in your OpenRAG project directory to the `/app/documents/` directory inside the Docker container. Files added to the host or the container will be visible in both locations. To configure this location, modify the **Documents Paths** variable in either the TUI's [Advanced Setup](/install#advanced-setup) or in the `.env` used by Docker Compose.
+The default path to your local folder is mounted from the `./documents` folder in your OpenRAG project directory to the `/app/documents/` directory inside the Docker container. Files added to the host or the container will be visible in both locations. To configure this location, modify the **Documents Paths** variable in either the TUI's [Advanced Setup](/install#setup) menu or in the `.env` used by Docker Compose.
To load and process a single file from the mapped location, click **Add Knowledge**, and then click **Add File**.
The file is loaded into your OpenSearch database, and appears in the Knowledge page.
@@ -57,7 +47,7 @@ If you wish to use another provider, add the secrets to another provider.
1. If OpenRAG is running, stop it with **Status** > **Stop Services**.
2. Click **Advanced Setup**.
- 3. Add the OAuth provider's client and secret key in the [Advanced Setup](/install#advanced-setup) menu.
+ 3. Add the OAuth provider's client and secret key in the [Advanced Setup](/install#setup) menu.
4. Click **Save Configuration**.
The TUI generates a new `.env` file with your OAuth values.
5. Click **Start Container Services**.
@@ -100,6 +90,17 @@ You can monitor the sync progress in the
Once processing is complete, the synced documents become available in your knowledge base and can be searched through the chat interface or Knowledge page.
+## Explore knowledge
+
+The **Knowledge** page lists the documents OpenRAG has ingested into the OpenSearch vector database's `documents` index.
+
+To explore your current knowledge, click **Knowledge**.
+Click on a document to display the chunks derived from splitting the default documents into the vector database.
+
+Documents are processed with the default **Knowledge Ingest** flow, so if you want to split your documents differently, edit the **Knowledge Ingest** flow.
+
+
+
### Knowledge ingestion settings
To configure the knowledge ingestion pipeline parameters, see [Docling Ingestion](/ingestion).
@@ -139,7 +140,7 @@ A new filter is created with default settings that match everything.
OpenRAG automatically detects and configures the correct vector dimensions for embedding models, ensuring optimal search performance and compatibility.
-The complete list of supported models is available at [/src/services/models_service.py](https://github.com/langflow-ai/openrag/blob/main/src/services/models_service.py).
+The complete list of supported models is available at [`models_service.py` in the OpenRAG repository](https://github.com/langflow-ai/openrag/blob/main/src/services/models_service.py).
You can use custom embedding models by specifying them in your configuration.
@@ -147,4 +148,4 @@ If you use an unknown embedding model, OpenRAG will automatically fall back to `
The default embedding dimension is `1536` and the default model is `text-embedding-3-small`.
-For models with known vector dimensions, see [/src/config/settings.py](https://github.com/langflow-ai/openrag/blob/main/src/config/settings.py).
\ No newline at end of file
+For models with known vector dimensions, see [`settings.py` in the OpenRAG repository](https://github.com/langflow-ai/openrag/blob/main/src/config/settings.py).
\ No newline at end of file
diff --git a/docs/docs/get-started/docker.mdx b/docs/docs/get-started/docker.mdx
index 415b39fe..f7ec730b 100644
--- a/docs/docs/get-started/docker.mdx
+++ b/docs/docs/get-started/docker.mdx
@@ -1,8 +1,9 @@
---
-title: Docker deployment
+title: Deploy with Docker
slug: /get-started/docker
---
+import PartialOnboarding from '@site/docs/_partial-onboarding.mdx';
import PartialExternalPreview from '@site/docs/_partial-external-preview.mdx';
@@ -14,7 +15,18 @@ They deploy the same applications and containers, but to different environments.
- [`docker-compose-cpu.yml`](https://github.com/langflow-ai/openrag/blob/main/docker-compose-cpu.yml) is a CPU-only version of OpenRAG for systems without GPU support. Use this Docker compose file for environments where GPU drivers aren't available.
-To install OpenRAG with Docker Compose:
+## Prerequisites
+
+- [Python Version 3.10 to 3.13](https://www.python.org/downloads/release/python-3100/)
+- [uv](https://docs.astral.sh/uv/getting-started/installation/)
+- [Podman](https://podman.io/docs/installation) (recommended) or [Docker](https://docs.docker.com/get-docker/) installed
+- [Docker Compose](https://docs.docker.com/compose/install/) installed. If you're using Podman, use [podman-compose](https://docs.podman.io/en/latest/markdown/podman-compose.1.html) or alias Docker compose commands to Podman commands.
+- Create an [OpenAI API key](https://platform.openai.com/api-keys). This key is **required** to start OpenRAG, but you can choose a different model provider during [Application Onboarding](#application-onboarding).
+- Optional: GPU support requires an NVIDIA GPU with CUDA support and compatible NVIDIA drivers installed on the OpenRAG host machine. If you don't have GPU capabilities, OpenRAG provides an alternate CPU-only deployment.
+
+## Deploy OpenRAG with Docker Compose
+
+To install OpenRAG with Docker Compose, do the following:
1. Clone the OpenRAG repository.
```bash
@@ -22,7 +34,7 @@ To install OpenRAG with Docker Compose:
cd openrag
```
-2. Copy the example `.env` file that is included in the repository root.
+2. Copy the example `.env` file included in the repository root.
The example file includes all environment variables with comments to guide you in finding and setting their values.
```bash
cp .env.example .env
@@ -33,18 +45,18 @@ To install OpenRAG with Docker Compose:
touch .env
```
-3. Set environment variables. The Docker Compose files are populated with values from your `.env`, so the following values are **required** to be set:
+3. Set environment variables. The Docker Compose files will be populated with values from your `.env`.
+The following values are **required** to be set:
```bash
OPENSEARCH_PASSWORD=your_secure_password
OPENAI_API_KEY=your_openai_api_key
-
LANGFLOW_SUPERUSER=admin
LANGFLOW_SUPERUSER_PASSWORD=your_langflow_password
LANGFLOW_SECRET_KEY=your_secret_key
```
- For more information on configuring OpenRAG with environment variables, see [Environment variables](/configure/configuration).
- For additional configuration values, including `config.yaml`, see [Configuration](/configure/configuration).
+
+ For more information on configuring OpenRAG with environment variables, see [Environment variables](/reference/configuration).
4. Deploy OpenRAG with Docker Compose based on your deployment type.
@@ -79,14 +91,39 @@ To install OpenRAG with Docker Compose:
- **Backend API**: http://localhost:8000
- **Langflow**: http://localhost:7860
-Continue with the [Quickstart](/quickstart).
+6. Continue with [Application Onboarding](#application-onboarding).
-## Rebuild all Docker containers
+
-If you need to reset state and rebuild all of your containers, run the following command.
+## Container management commands
+
+Manage your OpenRAG containers with the following commands.
+These commands are also available in the TUI's [Status menu](/get-started/tui#status).
+
+### Upgrade containers
+
+Upgrade your containers to the latest version while preserving your data.
+
+```bash
+docker compose pull
+docker compose up -d --force-recreate
+```
+
+### Rebuild containers (destructive)
+
+Reset state by rebuilding all of your containers.
Your OpenSearch and Langflow databases will be lost.
Documents stored in the `./documents` directory will persist, since the directory is mounted as a volume in the OpenRAG backend container.
```bash
docker compose up --build --force-recreate --remove-orphans
```
+
+### Remove all containers and data (destructive)
+
+Completely remove your OpenRAG installation and delete all data.
+This deletes all of your data, including OpenSearch data, uploaded documents, and authentication.
+```bash
+docker compose down --volumes --remove-orphans --rmi local
+docker system prune -f
+```
\ No newline at end of file
diff --git a/docs/docs/get-started/install.mdx b/docs/docs/get-started/install.mdx
index ce3d1531..1759e813 100644
--- a/docs/docs/get-started/install.mdx
+++ b/docs/docs/get-started/install.mdx
@@ -5,41 +5,51 @@ slug: /install
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
+import PartialOnboarding from '@site/docs/_partial-onboarding.mdx';
import PartialExternalPreview from '@site/docs/_partial-external-preview.mdx';
-OpenRAG can be installed in multiple ways:
+[Install the OpenRAG Python wheel](#install-python-wheel), and then run the [OpenRAG Terminal User Interface(TUI)](#setup) to start your OpenRAG deployment with a guided setup process.
-* [**Python wheel**](#install-python-wheel): Install the OpenRAG Python wheel and use the [OpenRAG Terminal User Interface (TUI)](/get-started/tui) to install, run, and configure your OpenRAG deployment without running Docker commands.
-
-* [**Docker Compose**](get-started/docker): Clone the OpenRAG repository and deploy OpenRAG with Docker Compose, including all services and dependencies.
+If you prefer running Docker commands and manually editing `.env` files, see [Deploy with Docker](/get-started/docker).
## Prerequisites
- [Python Version 3.10 to 3.13](https://www.python.org/downloads/release/python-3100/)
- [uv](https://docs.astral.sh/uv/getting-started/installation/)
-- [Docker](https://docs.docker.com/get-docker/) or [Podman](https://podman.io/docs/installation) installed
+- [Podman](https://podman.io/docs/installation) (recommended) or [Docker](https://docs.docker.com/get-docker/) installed
- [Docker Compose](https://docs.docker.com/compose/install/) installed. If using Podman, use [podman-compose](https://docs.podman.io/en/latest/markdown/podman-compose.1.html) or alias Docker compose commands to Podman commands.
-- For GPU support: (TBD)
+- Create an [OpenAI API key](https://platform.openai.com/api-keys). This key is **required** to start OpenRAG, but you can choose a different model provider during [Application Onboarding](#application-onboarding).
+- Optional: GPU support requires an NVIDIA GPU with [CUDA](https://docs.nvidia.com/cuda/) support and compatible NVIDIA drivers installed on the OpenRAG host machine. If you don't have GPU capabilities, OpenRAG provides an alternate CPU-only deployment.
-## Python wheel {#install-python-wheel}
+## Install the OpenRAG Python wheel {#install-python-wheel}
-The Python wheel is currently available internally, but will be available on PyPI at launch.
-The wheel installs the OpenRAG wheel, which includes the TUI for installing, running, and managing OpenRAG.
-For more information on virtual environments, see [uv](https://docs.astral.sh/uv/pip/environments).
+:::important
+The `.whl` file is currently available as an internal download during public preview, and will be published to PyPI in a future release.
+:::
-1. Create a new project with a virtual environment using [uv](https://docs.astral.sh/uv/pip/environments).
+The OpenRAG wheel installs the Terminal User Interface (TUI) for configuring and running OpenRAG.
+
+1. Create a new project with a virtual environment using `uv init`.
```bash
uv init YOUR_PROJECT_NAME
cd YOUR_PROJECT_NAME
```
-2. Add the OpenRAG wheel to your project and install it in the virtual environment.
- Replace `PATH/TO/` and `VERSION` with your OpenRAG wheel location and version.
+
+ The `(venv)` prompt doesn't change, but `uv` commands will automatically use the project's virtual environment.
+ For more information on virtual environments, see the [uv documentation](https://docs.astral.sh/uv/pip/environments).
+
+2. Add the local OpenRAG wheel to your project's virtual environment.
+
```bash
uv add PATH/TO/openrag-VERSION-py3-none-any.whl
```
+ Replace `PATH/TO/` and `VERSION` with the path and version of your downloaded OpenRAG `.whl` file.
+
+ For example, if your `.whl` file is in the `~/Downloads` directory, the command is `uv add ~/Downloads/openrag-0.1.8-py3-none-any.whl`.
+
3. Ensure all dependencies are installed and updated in your virtual environment.
```bash
uv sync
@@ -50,95 +60,66 @@ For more information on virtual environments, see [uv](https://docs.astral.sh/uv
uv run openrag
```
- The OpenRAG TUI opens.
+5. Continue with [Setup OpenRAG with the TUI](#setup).
-5. To install OpenRAG with Basic Setup, click **Basic Setup** or press 1. Basic Setup does not set up OAuth connections for ingestion from Google Drive, OneDrive, or AWS. For OAuth setup, see [Advanced Setup](#advanced-setup).
- The TUI prompts you for the required startup values.
- Click **Generate Passwords** to autocomplete fields that contain **Auto-generated Secure Password**, or bring your own passwords.
-
- Where do I find the required startup values?
-
- | Variable | Where to Find | Description |
- |----------|---------------|-------------|
- | `OPENSEARCH_PASSWORD` | Auto-generated secure password | The password for OpenSearch database access. Must be at least 8 characters and must contain at least one uppercase letter, one lowercase letter, one digit, and one special character. |
- | `OPENAI_API_KEY` | [OpenAI Platform](https://platform.openai.com/api-keys) | API key from your OpenAI account. |
- | `LANGFLOW_SUPERUSER` | User generated | Username for Langflow admin access. For more, see [Langflow docs](https://docs.langflow.org/api-keys-and-authentication#langflow-superuser). |
- | `LANGFLOW_SUPERUSER_PASSWORD` | Auto-generated secure password | Password for Langflow admin access. For more, see the [Langflow docs](https://docs.langflow.org/api-keys-and-authentication#langflow-superuser). |
- | `LANGFLOW_SECRET_KEY` | Auto-generated secure key | Secret key for Langflow security. For more, see the [Langflow docs](https://docs.langflow.org/api-keys-and-authentication#langflow-secret-key). |
- | `LANGFLOW_AUTO_LOGIN` | Auto-generated or manual | Auto-login configuration. For more, see the [Langflow docs](https://docs.langflow.org/api-keys-and-authentication#langflow-auto-login). |
- | `LANGFLOW_NEW_USER_IS_ACTIVE` | Langflow | New user activation setting. For more, see the [Langflow docs](https://docs.langflow.org/api-keys-and-authentication#langflow-new-user-is-active). |
- | `LANGFLOW_ENABLE_SUPERUSER_CLI` | Langflow server | Superuser CLI access setting. For more, see the [Langflow docs](https://docs.langflow.org/api-keys-and-authentication#langflow-enable-superuser-cli). |
- | `DOCUMENTS_PATH` | Set your local path | Path to your document storage directory. |
-
-
-
- To complete credentials, click **Save Configuration**.
+## Set up OpenRAG with the TUI {#setup}
-6. To start OpenRAG with your credentials, click **Start Container Services**.
- Startup pulls container images and starts them, so it can take some time.
- The operation has completed when the **Close** button is available and the terminal displays:
- ```bash
- Services started successfully
- Command completed successfully
- ```
+The TUI creates a `.env` file in your OpenRAG directory root and starts OpenRAG.
-7. To open the OpenRAG application, click **Open App**, press 6, or navigate to `http://localhost:3000`.
- The application opens.
-8. Select your language model and embedding model provider, and complete the required fields.
- **Your provider can only be selected once, and you must use the same provider for your language model and embedding model.**
- The language model can be changed, but the embeddings model cannot be changed.
- To change your provider selection, you must restart OpenRAG and delete the `config.yml` file.
+**Basic Setup** generates all of the required values except the OpenAI API key.
+**Basic Setup** does not set up OAuth connections for ingestion from Google Drive, OneDrive, or AWS.
+For OAuth setup, use **Advanced Setup**.
-
-
- 9. If you already entered a value for `OPENAI_API_KEY` in the TUI in Step 5, enable **Get API key from environment variable**.
- 10. Under **Advanced settings**, select your **Embedding Model** and **Language Model**.
- 11. To load 2 sample PDFs, enable **Sample dataset**.
- This is recommended, but not required.
- 12. Click **Complete**.
-
-
-
- 9. Complete the fields for **watsonx.ai API Endpoint**, **IBM API key**, and **IBM Project ID**.
- These values are found in your IBM watsonx deployment.
- 10. Under **Advanced settings**, select your **Embedding Model** and **Language Model**.
- 11. To load 2 sample PDFs, enable **Sample dataset**.
- This is recommended, but not required.
- 12. Click **Complete**.
-
-
-
- 9. Enter your Ollama server's base URL address.
- The default Ollama server address is `http://localhost:11434`.
- Since OpenRAG is running in a container, you may need to change `localhost` to access services outside of the container. For example, change `http://localhost:11434` to `http://host.docker.internal:11434` to connect to Ollama.
- OpenRAG automatically sends a test connection to your Ollama server to confirm connectivity.
- 10. Select the **Embedding Model** and **Language Model** your Ollama server is running.
- OpenRAG automatically lists the available models from your Ollama server.
- 11. To load 2 sample PDFs, enable **Sample dataset**.
- This is recommended, but not required.
- 12. Click **Complete**.
+If the TUI detects OAuth credentials, it enforces the **Advanced Setup** path.
+If the TUI detects a `.env` file in the OpenRAG root directory, it will source any variables from the `.env` file.
+
+
+
+ 1. To install OpenRAG with **Basic Setup**, click **Basic Setup** or press 1.
+ 2. Click **Generate Passwords** to generate passwords for OpenSearch and Langflow.
+ 3. Paste your OpenAI API key in the OpenAI API key field.
+ 4. Click **Save Configuration**.
+ 5. To start OpenRAG, click **Start Container Services**.
+ Startup pulls container images and runs them, so it can take some time.
+ When startup is complete, the TUI displays the following:
+ ```bash
+ Services started successfully
+ Command completed successfully
+ ```
+ 6. To open the OpenRAG application, click **Open App**.
+ 7. Continue with [Application Onboarding](#application-onboarding).
+
+
+ 1. To install OpenRAG with **Advanced Setup**, click **Advanced Setup** or press 2.
+ 2. Click **Generate Passwords** to generate passwords for OpenSearch and Langflow.
+ 3. Paste your OpenAI API key in the OpenAI API key field.
+ 4. Add your client and secret values for Google, Azure, or AWS OAuth.
+ These values can be found in your OAuth provider.
+ 5. The OpenRAG TUI presents redirect URIs for your OAuth app.
+ These are the URLs your OAuth provider will redirect back to after user sign-in.
+ Register these redirect values with your OAuth provider as they are presented in the TUI.
+ 6. Click **Save Configuration**.
+ 7. To start OpenRAG, click **Start Container Services**.
+ Startup pulls container images and runs them, so it can take some time.
+ When startup is complete, the TUI displays the following:
+ ```bash
+ Services started successfully
+ Command completed successfully
+ ```
+ 8. To open the OpenRAG application, click **Open App**, press 6, or navigate to `http://localhost:3000`.
+ You will be presented with your provider's OAuth sign-in screen, and be redirected to the redirect URI after sign-in.
+ Continue with Application Onboarding.
+
+ Two additional variables are available for Advanced Setup:
+
+ The `LANGFLOW_PUBLIC_URL` controls where the Langflow web interface can be accessed. This is where users interact with their flows in a browser.
+
+ The `WEBHOOK_BASE_URL` controls where the endpoint for `/connectors/CONNECTOR_TYPE/webhook` will be available.
+ This connection enables real-time document synchronization with external services.
+ For example, for Google Drive file synchronization the webhook URL is `/connectors/google_drive/webhook`.
-
+ 9. Continue with [Application Onboarding](#application-onboarding).
+
-13. Continue with the [Quickstart](/quickstart).
-
-### Advanced Setup {#advanced-setup}
-
-**Advanced Setup** includes the required values from **Basic Setup**, with additional settings for OAuth credentials.
-If the OpenRAG TUI detects OAuth credentials, it enforces the Advanced Setup path.
-1. Add your client and secret values for Google, Azure, or AWS OAuth.
-These values can be found in your OAuth provider.
-2. The OpenRAG TUI presents redirect URIs for your OAuth app.
-These are the URLs your OAuth provider will redirect back to after user sign-in.
-Register these redirect values with your OAuth provider as they are presented in the TUI.
-3. To open the OpenRAG application, click **Open App** or press 6.
-You will be presented with your provider's OAuth sign-in screen, and be redirected to the redirect URI after sign-in.
-
-Two additional variables are available for Advanced Setup:
-
-The `LANGFLOW_PUBLIC_URL` controls where the Langflow web interface can be accessed. This is where users interact with their flows in a browser.
-
-The `WEBHOOK_BASE_URL` controls where the endpoint for `/connectors/CONNECTOR_TYPE/webhook` will be available.
-This connection enables real-time document synchronization with external services.
-For example, for Google Drive file synchronization the webhook URL is `/connectors/google_drive/webhook`.
\ No newline at end of file
+
\ No newline at end of file
diff --git a/docs/docs/get-started/quickstart.mdx b/docs/docs/get-started/quickstart.mdx
index 5d09f67d..838ad006 100644
--- a/docs/docs/get-started/quickstart.mdx
+++ b/docs/docs/get-started/quickstart.mdx
@@ -15,40 +15,6 @@ Get started with OpenRAG by loading your knowledge, swapping out your language m
## Prerequisites
- [Install and start OpenRAG](/install)
-- Create a [Langflow API key](https://docs.langflow.org/api-keys-and-authentication)
-
- Create a Langflow API key
-
- A Langflow API key is a user-specific token you can use with Langflow.
- It is **only** used for sending requests to the Langflow server.
- It does **not** access to OpenRAG.
-
- To create a Langflow API key, do the following:
-
- 1. In Langflow, click your user icon, and then select **Settings**.
- 2. Click **Langflow API Keys**, and then click **Add New**.
- 3. Name your key, and then click **Create API Key**.
- 4. Copy the API key and store it securely.
- 5. To use your Langflow API key in a request, set a `LANGFLOW_API_KEY` environment variable in your terminal, and then include an `x-api-key` header or query parameter with your request.
- For example:
-
- ```bash
- # Set variable
- export LANGFLOW_API_KEY="sk..."
-
- # Send request
- curl --request POST \
- --url "http://LANGFLOW_SERVER_ADDRESS/api/v1/run/FLOW_ID" \
- --header "Content-Type: application/json" \
- --header "x-api-key: $LANGFLOW_API_KEY" \
- --data '{
- "output_type": "chat",
- "input_type": "chat",
- "input_value": "Hello"
- }'
- ```
-
-
## Find your way around
@@ -99,12 +65,44 @@ You can more quickly access the **Language Model** and **Agent Instructions** fi
## Integrate OpenRAG into your application
To integrate OpenRAG into your application, use the [Langflow API](https://docs.langflow.org/api-reference-api-examples).
-Make requests with Python, TypeScript, or any HTTP client to run one of OpenRAG's default flows and get a response, and then modify the flow further to improve results.
+Make requests with Python, TypeScript, or any HTTP client to run one of OpenRAG's default flows and get a response, and then modify the flow further to improve results. Langflow provides code snippets to help you get started.
-Langflow provides code snippets to help you get started with the Langflow API.
-
-1. To navigate to the OpenRAG OpenSearch Agent flow, click **Settings**, and then click **Edit in Langflow** in the OpenRAG OpenSearch Agent flow.
-2. Click **Share**, and then click **API access**.
+1. Create a [Langflow API key](https://docs.langflow.org/api-keys-and-authentication).
+
+ Create a Langflow API key
+
+ A Langflow API key is a user-specific token you can use with Langflow.
+ It is **only** used for sending requests to the Langflow server.
+ It does **not** access to OpenRAG.
+
+ To create a Langflow API key, do the following:
+
+ 1. In Langflow, click your user icon, and then select **Settings**.
+ 2. Click **Langflow API Keys**, and then click **Add New**.
+ 3. Name your key, and then click **Create API Key**.
+ 4. Copy the API key and store it securely.
+ 5. To use your Langflow API key in a request, set a `LANGFLOW_API_KEY` environment variable in your terminal, and then include an `x-api-key` header or query parameter with your request.
+ For example:
+
+ ```bash
+ # Set variable
+ export LANGFLOW_API_KEY="sk..."
+
+ # Send request
+ curl --request POST \
+ --url "http://LANGFLOW_SERVER_ADDRESS/api/v1/run/FLOW_ID" \
+ --header "Content-Type: application/json" \
+ --header "x-api-key: $LANGFLOW_API_KEY" \
+ --data '{
+ "output_type": "chat",
+ "input_type": "chat",
+ "input_value": "Hello"
+ }'
+ ```
+
+
+2. To navigate to the OpenRAG OpenSearch Agent flow, click **Settings**, and then click **Edit in Langflow** in the OpenRAG OpenSearch Agent flow.
+3. Click **Share**, and then click **API access**.
The default code in the API access pane constructs a request with the Langflow server `url`, `headers`, and a `payload` of request data. The code snippets automatically include the `LANGFLOW_SERVER_ADDRESS` and `FLOW_ID` values for the flow. Replace these values if you're using the code for a different server or flow. The default Langflow server address is http://localhost:7860.
@@ -189,7 +187,7 @@ Langflow provides code snippets to help you get started with the Langflow API.
-3. Copy the snippet, paste it in a script file, and then run the script to send the request. If you are using the curl snippet, you can run the command directly in your terminal.
+4. Copy the snippet, paste it in a script file, and then run the script to send the request. If you are using the curl snippet, you can run the command directly in your terminal.
If the request is successful, the response includes many details about the flow run, including the session ID, inputs, outputs, components, durations, and more.
The following is an example of a response from running the **Simple Agent** template flow:
diff --git a/docs/docs/get-started/tui.mdx b/docs/docs/get-started/tui.mdx
index 49c676f7..f3cfe51e 100644
--- a/docs/docs/get-started/tui.mdx
+++ b/docs/docs/get-started/tui.mdx
@@ -7,11 +7,10 @@ import PartialExternalPreview from '@site/docs/_partial-external-preview.mdx';
-The OpenRAG Terminal User Interface (TUI) provides a streamlined way to set up, configure, and monitor your OpenRAG deployment directly from the terminal, on any operating system.
+The OpenRAG Terminal User Interface (TUI) allows you to set up, configure, and monitor your OpenRAG deployment directly from the terminal, on any operating system.

-The TUI offers an easier way to use OpenRAG without sacrificing control.
Instead of starting OpenRAG using Docker commands and manually editing values in the `.env` file, the TUI walks you through the setup. It prompts for variables where required, creates a `.env` file for you, and then starts OpenRAG.
Once OpenRAG is running, use the TUI to monitor your application, control your containers, and retrieve logs.
@@ -19,7 +18,6 @@ Once OpenRAG is running, use the TUI to monitor your application, control your c
## Start the TUI
To start the TUI, run the following commands from the directory where you installed OpenRAG.
-For more information, see [Install OpenRAG](/install).
```bash
uv sync
diff --git a/docs/docs/get-started/what-is-openrag.mdx b/docs/docs/get-started/what-is-openrag.mdx
index 5540d92d..18c01482 100644
--- a/docs/docs/get-started/what-is-openrag.mdx
+++ b/docs/docs/get-started/what-is-openrag.mdx
@@ -18,6 +18,8 @@ OpenRAG connects and amplifies three popular, proven open-source projects into o
* [Docling](https://docling-project.github.io/docling/) - Docling simplifies document processing, parsing diverse formats — including advanced PDF understanding — and providing seamless integrations with the gen AI ecosystem.
-OpenRAG builds on Langflow's familiar interface while adding OpenSearch for vector storage and Docling for simplified document parsing, with opinionated flows that serve as ready-to-use recipes for ingestion, retrieval, and generation from popular sources like OneDrive, Google Drive, and AWS. And don't fear: every part of the stack is swappable. Write your own custom components in Python, try different language models, and customize your flows to build an agentic RAG system that solves problems.
+OpenRAG builds on Langflow's familiar interface while adding OpenSearch for vector storage and Docling for simplified document parsing, with opinionated flows that serve as ready-to-use recipes for ingestion, retrieval, and generation from popular sources like OneDrive, Google Drive, and AWS.
-Ready to get started? Install OpenRAG and then run the Quickstart to create a powerful RAG pipeline.
\ No newline at end of file
+What's more, every part of the stack is swappable. Write your own custom components in Python, try different language models, and customize your flows to build an agentic RAG system.
+
+Ready to get started? [Install OpenRAG](/install) and then run the [Quickstart](/quickstart) to create a powerful RAG pipeline.
\ No newline at end of file
diff --git a/docs/docs/reference/configuration.mdx b/docs/docs/reference/configuration.mdx
new file mode 100644
index 00000000..c8ca7cfa
--- /dev/null
+++ b/docs/docs/reference/configuration.mdx
@@ -0,0 +1,162 @@
+---
+title: Environment variables
+slug: /reference/configuration
+---
+
+import Icon from "@site/src/components/icon/icon";
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+OpenRAG recognizes [supported environment variables](#supported-environment-variables) from the following sources:
+
+* [Environment variables](#supported-environment-variables) - Values set in the `.env` file.
+* [Langflow runtime overrides](#langflow-runtime-overrides) - Langflow components may tweak environment variables at runtime.
+* [Default or fallback values](#default-values-and-fallbacks) - These values are default or fallback values if OpenRAG doesn't find a value.
+
+## Configure environment variables
+
+Environment variables are set in a `.env` file in the root of your OpenRAG project directory.
+
+For an example `.env` file, see [`.env.example` in the OpenRAG repository](https://github.com/langflow-ai/openrag/blob/main/.env.example).
+
+The Docker Compose files are populated with values from your `.env`, so you don't need to edit the Docker Compose files manually.
+
+Environment variables always take precedence over other variables.
+
+### Set environment variables
+
+To set environment variables, do the following.
+
+1. Stop OpenRAG.
+2. Set the values in the `.env` file:
+ ```bash
+ LOG_LEVEL=DEBUG
+ LOG_FORMAT=json
+ SERVICE_NAME=openrag-dev
+ ```
+3. Start OpenRAG.
+
+Updating provider API keys or provider endpoints in the `.env` file will not take effect after [Application onboarding](/install#application-onboarding). To change these values, you must:
+
+1. Stop OpenRAG.
+2. Remove the containers:
+ ```
+ docker-compose down
+ ```
+3. Update the values in your `.env` file.
+4. Start OpenRAG containers.
+ ```
+ docker-compose up -d
+ ```
+5. Complete [Application onboarding](/install#application-onboarding) again.
+
+## Supported environment variables
+
+All OpenRAG configuration can be controlled through environment variables.
+
+### AI provider settings
+
+Configure which AI models and providers OpenRAG uses for language processing and embeddings.
+For more information, see [Application onboarding](/install#application-onboarding).
+
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `EMBEDDING_MODEL` | `text-embedding-3-small` | Embedding model for vector search. |
+| `LLM_MODEL` | `gpt-4o-mini` | Language model for the chat agent. |
+| `MODEL_PROVIDER` | `openai` | Model provider, such as OpenAI or IBM watsonx.ai. |
+| `OPENAI_API_KEY` | - | Your OpenAI API key. Required. |
+| `PROVIDER_API_KEY` | - | API key for the model provider. |
+| `PROVIDER_ENDPOINT` | - | Custom provider endpoint. Only used for IBM or Ollama providers. |
+| `PROVIDER_PROJECT_ID` | - | Project ID for providers. Only required for the IBM watsonx.ai provider. |
+
+### Document processing
+
+Control how OpenRAG processes and ingests documents into your knowledge base.
+For more information, see [Ingestion](/ingestion).
+
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `CHUNK_OVERLAP` | `200` | Overlap between chunks. |
+| `CHUNK_SIZE` | `1000` | Text chunk size for document processing. |
+| `DISABLE_INGEST_WITH_LANGFLOW` | `false` | Disable Langflow ingestion pipeline. |
+| `DOCLING_OCR_ENGINE` | - | OCR engine for document processing. |
+| `OCR_ENABLED` | `false` | Enable OCR for image processing. |
+| `OPENRAG_DOCUMENTS_PATHS` | `./documents` | Document paths for ingestion. |
+| `PICTURE_DESCRIPTIONS_ENABLED` | `false` | Enable picture descriptions. |
+
+### Langflow settings
+
+Configure Langflow authentication.
+
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `LANGFLOW_AUTO_LOGIN` | `False` | Enable auto-login for Langflow. |
+| `LANGFLOW_CHAT_FLOW_ID` | pre-filled | This value is pre-filled. The default value is found in [.env.example](https://github.com/langflow-ai/openrag/blob/main/.env.example). |
+| `LANGFLOW_ENABLE_SUPERUSER_CLI` | `False` | Enable superuser CLI. |
+| `LANGFLOW_INGEST_FLOW_ID` | pre-filled | This value is pre-filled. The default value is found in [.env.example](https://github.com/langflow-ai/openrag/blob/main/.env.example). |
+| `LANGFLOW_KEY` | auto-generated | Explicit Langflow API key. |
+| `LANGFLOW_NEW_USER_IS_ACTIVE` | `False` | New users are active by default. |
+| `LANGFLOW_PUBLIC_URL` | `http://localhost:7860` | Public URL for Langflow. |
+| `LANGFLOW_SECRET_KEY` | - | Secret key for Langflow internal operations. |
+| `LANGFLOW_SUPERUSER` | - | Langflow admin username. Required. |
+| `LANGFLOW_SUPERUSER_PASSWORD` | - | Langflow admin password. Required. |
+| `LANGFLOW_URL` | `http://localhost:7860` | Langflow URL. |
+| `NUDGES_FLOW_ID` | pre-filled | This value is pre-filled. The default value is found in [.env.example](https://github.com/langflow-ai/openrag/blob/main/.env.example). |
+| `SYSTEM_PROMPT` | "You are a helpful AI assistant with access to a knowledge base. Answer questions based on the provided context." | System prompt for the Langflow agent. |
+
+### OAuth provider settings
+
+Configure OAuth providers and external service integrations.
+
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `AWS_ACCESS_KEY_ID` / `AWS_SECRET_ACCESS_KEY` | - | AWS integrations. |
+| `GOOGLE_OAUTH_CLIENT_ID` / `GOOGLE_OAUTH_CLIENT_SECRET` | - | Google OAuth authentication. |
+| `MICROSOFT_GRAPH_OAUTH_CLIENT_ID` / `MICROSOFT_GRAPH_OAUTH_CLIENT_SECRET` | - | Microsoft OAuth. |
+| `WEBHOOK_BASE_URL` | - | Base URL for webhook endpoints. |
+
+### OpenSearch settings
+
+Configure OpenSearch database authentication.
+
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `OPENSEARCH_HOST` | `localhost` | OpenSearch host. |
+| `OPENSEARCH_PASSWORD` | - | Password for OpenSearch admin user. Required. |
+| `OPENSEARCH_PORT` | `9200` | OpenSearch port. |
+| `OPENSEARCH_USERNAME` | `admin` | OpenSearch username. |
+
+### System settings
+
+Configure general system components, session management, and logging.
+
+| Variable | Default | Description |
+|----------|---------|-------------|
+| `LANGFLOW_KEY_RETRIES` | `15` | Number of retries for Langflow key generation. |
+| `LANGFLOW_KEY_RETRY_DELAY` | `2.0` | Delay between retries in seconds. |
+| `LOG_FORMAT` | - | Log format (set to "json" for JSON output). |
+| `LOG_LEVEL` | `INFO` | Logging level (DEBUG, INFO, WARNING, ERROR). |
+| `MAX_WORKERS` | - | Maximum number of workers for document processing. |
+| `SERVICE_NAME` | `openrag` | Service name for logging. |
+| `SESSION_SECRET` | auto-generated | Session management. |
+
+## Langflow runtime overrides
+
+Langflow runtime overrides allow you to modify component settings at runtime without changing the base configuration.
+
+Runtime overrides are implemented through **tweaks** - parameter modifications that are passed to specific Langflow components during flow execution.
+
+For more information on tweaks, see [Input schema (tweaks)](https://docs.langflow.org/concepts-publish#input-schema).
+
+## Default values and fallbacks
+
+When no environment variables or configuration file values are provided, OpenRAG uses default values.
+These values can be found in the code base at the following locations.
+
+### OpenRAG configuration defaults
+
+These values are defined in [`config_manager.py` in the OpenRAG repository](https://github.com/langflow-ai/openrag/blob/main/src/config/config_manager.py).
+
+### System configuration defaults
+
+These fallback values are defined in [`settings.py` in the OpenRAG repository](https://github.com/langflow-ai/openrag/blob/main/src/config/settings.py).
\ No newline at end of file
diff --git a/docs/docs/support/troubleshoot.mdx b/docs/docs/support/troubleshoot.mdx
index fca6935e..9946db38 100644
--- a/docs/docs/support/troubleshoot.mdx
+++ b/docs/docs/support/troubleshoot.mdx
@@ -13,12 +13,12 @@ This page provides troubleshooting advice for issues you might encounter when us
## OpenSearch fails to start
-Check that `OPENSEARCH_PASSWORD` is set and meets requirements.
+Check that `OPENSEARCH_PASSWORD` set in [Environment variables](/reference/configuration) meets requirements.
The password must contain at least 8 characters, and must contain at least one uppercase letter, one lowercase letter, one digit, and one special character that is strong.
## Langflow connection issues
-Verify the `LANGFLOW_SUPERUSER` credentials are correct.
+Verify the `LANGFLOW_SUPERUSER` credentials set in [Environment variables](/reference/configuration) are correct.
## Memory errors
@@ -51,60 +51,61 @@ To reset your local containers and pull new images, do the following:
1. Stop your containers and completely remove them.
-
-
-
- ```bash
- # Stop all running containers
- docker stop $(docker ps -q)
+
+
- # Remove all containers (including stopped ones)
- docker rm --force $(docker ps -aq)
+ ```bash
+ # Stop all running containers
+ podman stop --all
+
+ # Remove all containers (including stopped ones)
+ podman rm --all --force
+
+ # Remove all images
+ podman rmi --all --force
+
+ # Remove all volumes
+ podman volume prune --force
+
+ # Remove all networks (except default)
+ podman network prune --force
+
+ # Clean up any leftover data
+ podman system prune --all --force --volumes
+ ```
- # Remove all images
- docker rmi --force $(docker images -q)
+
+
- # Remove all volumes
- docker volume prune --force
+ ```bash
+ # Stop all running containers
+ docker stop $(docker ps -q)
+
+ # Remove all containers (including stopped ones)
+ docker rm --force $(docker ps -aq)
+
+ # Remove all images
+ docker rmi --force $(docker images -q)
+
+ # Remove all volumes
+ docker volume prune --force
+
+ # Remove all networks (except default)
+ docker network prune --force
+
+ # Clean up any leftover data
+ docker system prune --all --force --volumes
+ ```
- # Remove all networks (except default)
- docker network prune --force
-
- # Clean up any leftover data
- docker system prune --all --force --volumes
- ```
-
-
-
-
- ```bash
- # Stop all running containers
- podman stop --all
-
- # Remove all containers (including stopped ones)
- podman rm --all --force
-
- # Remove all images
- podman rmi --all --force
-
- # Remove all volumes
- podman volume prune --force
-
- # Remove all networks (except default)
- podman network prune --force
-
- # Clean up any leftover data
- podman system prune --all --force --volumes
- ```
-
-
-
+
+
2. Restart OpenRAG and upgrade to get the latest images for your containers.
```bash
+ uv sync
uv run openrag
```
3. In the OpenRAG TUI, click **Status**, and then click **Upgrade**.
When the **Close** button is active, the upgrade is complete.
-Close the window and open the OpenRAG appplication.
+Close the window and open the OpenRAG appplication.
\ No newline at end of file
diff --git a/docs/sidebars.js b/docs/sidebars.js
index 0040c9ed..9d0c49c8 100644
--- a/docs/sidebars.js
+++ b/docs/sidebars.js
@@ -28,22 +28,22 @@ const sidebars = {
{
type: "doc",
id: "get-started/install",
- label: "Installation"
+ label: "Install OpenRAG"
+ },
+ {
+ type: "doc",
+ id: "get-started/docker",
+ label: "Deploy with Docker"
},
{
type: "doc",
id: "get-started/quickstart",
label: "Quickstart"
},
- {
- type: "doc",
- id: "get-started/docker",
- label: "Docker Deployment"
- },
{
type: "doc",
id: "get-started/tui",
- label: "Terminal Interface (TUI)"
+ label: "Terminal User Interface (TUI)"
},
],
},
@@ -70,12 +70,12 @@ const sidebars = {
},
{
type: "category",
- label: "Configuration",
+ label: "Reference",
items: [
{
type: "doc",
- id: "configure/configuration",
- label: "Environment Variables"
+ id: "reference/configuration",
+ label: "Environment variables"
},
],
},
@@ -93,4 +93,4 @@ const sidebars = {
],
};
-export default sidebars;
+export default sidebars;
\ No newline at end of file
diff --git a/docs/src/css/custom.css b/docs/src/css/custom.css
index 0e56814d..b2b0ee5c 100644
--- a/docs/src/css/custom.css
+++ b/docs/src/css/custom.css
@@ -29,6 +29,34 @@
--docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3);
}
+/* Tabs Styling */
+.tabs-container {
+ border: 1px solid var(--ifm-color-emphasis-300);
+ border-radius: var(--ifm-global-radius);
+ padding: 1rem;
+ margin-bottom: 1rem;
+}
+
+.tabs {
+ margin-bottom: 1rem;
+}
+
+.tabs__item {
+ border: none;
+ border-bottom: 1px solid var(--ifm-color-emphasis-200);
+ margin-right: 0rem;
+ padding-bottom: 0.5rem;
+ border-radius: 0;
+}
+
+.tabs__item:hover {
+ background-color: var(--ifm-hover-overlay);
+}
+
+.tabs__item--active {
+ border-bottom-color: var(--ifm-tabs-color-active);
+}
+
/* GitHub Icon Button */
.header-github-link:hover {
opacity: 0.6;
diff --git a/frontend/components/ui/button.tsx b/frontend/components/ui/button.tsx
index 381ce3f7..c901d8ad 100644
--- a/frontend/components/ui/button.tsx
+++ b/frontend/components/ui/button.tsx
@@ -11,7 +11,7 @@ const buttonVariants = cva(
destructive:
"bg-destructive text-destructive-foreground hover:bg-destructive/90",
outline:
- "border border-input hover:bg-muted hover:text-accent-foreground disabled:bg-muted disabled:!border-none",
+ "border border-border hover:bg-muted hover:text-accent-foreground disabled:bg-muted disabled:!border-none",
primary:
"border bg-background text-secondary-foreground hover:bg-muted hover:shadow-sm",
warning: "bg-warning text-warning-foreground hover:bg-warning/90",
diff --git a/frontend/components/ui/card.tsx b/frontend/components/ui/card.tsx
index d1ec83e7..92c8a189 100644
--- a/frontend/components/ui/card.tsx
+++ b/frontend/components/ui/card.tsx
@@ -9,7 +9,7 @@ const Card = React.forwardRef<
ref={ref}
className={cn(
"rounded-xl border border-border bg-card text-card-foreground shadow-sm",
- className,
+ className
)}
{...props}
/>
@@ -33,8 +33,8 @@ const CardTitle = React.forwardRef<
diff --git a/frontend/components/ui/popover.tsx b/frontend/components/ui/popover.tsx
index ec42c030..ce79b6ae 100644
--- a/frontend/components/ui/popover.tsx
+++ b/frontend/components/ui/popover.tsx
@@ -1,31 +1,33 @@
-"use client"
+"use client";
-import * as React from "react"
-import * as PopoverPrimitive from "@radix-ui/react-popover"
+import * as PopoverPrimitive from "@radix-ui/react-popover";
+import * as React from "react";
-import { cn } from "@/lib/utils"
+import { cn } from "@/lib/utils";
-const Popover = PopoverPrimitive.Root
+const Popover = PopoverPrimitive.Root;
-const PopoverTrigger = PopoverPrimitive.Trigger
+const PopoverTrigger = PopoverPrimitive.Trigger;
+
+const PopoverAnchor = PopoverPrimitive.Anchor;
const PopoverContent = React.forwardRef<
- React.ElementRef,
- React.ComponentPropsWithoutRef
+ React.ElementRef,
+ React.ComponentPropsWithoutRef
>(({ className, align = "center", sideOffset = 4, ...props }, ref) => (
-
-
-
-))
-PopoverContent.displayName = PopoverPrimitive.Content.displayName
+
+
+
+));
+PopoverContent.displayName = PopoverPrimitive.Content.displayName;
-export { Popover, PopoverTrigger, PopoverContent }
\ No newline at end of file
+export { Popover, PopoverTrigger, PopoverAnchor, PopoverContent };
diff --git a/frontend/components/ui/select.tsx b/frontend/components/ui/select.tsx
index 66665060..63874553 100644
--- a/frontend/components/ui/select.tsx
+++ b/frontend/components/ui/select.tsx
@@ -26,7 +26,7 @@ const SelectTrigger = React.forwardRef<
span]:line-clamp-1",
+ "flex h-10 w-full items-center justify-between rounded-md border border-input px-3 py-2 text-sm ring-offset-background placeholder:text-muted-foreground focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2 disabled:cursor-not-allowed disabled:bg-muted [&>span]:line-clamp-1 disabled:border-none",
className
)}
{...props}
@@ -34,7 +34,7 @@ const SelectTrigger = React.forwardRef<
{children}
{props.disabled ? (
-
+
) : (
)}
diff --git a/frontend/package-lock.json b/frontend/package-lock.json
index 58d78031..33300bd4 100644
--- a/frontend/package-lock.json
+++ b/frontend/package-lock.json
@@ -44,6 +44,7 @@
"react-icons": "^5.5.0",
"react-markdown": "^10.1.0",
"react-syntax-highlighter": "^15.6.1",
+ "react-textarea-autosize": "^8.5.9",
"rehype-mathjax": "^7.1.0",
"rehype-raw": "^7.0.0",
"remark-gfm": "^4.0.1",
@@ -8473,6 +8474,23 @@
"react": ">= 0.14.0"
}
},
+ "node_modules/react-textarea-autosize": {
+ "version": "8.5.9",
+ "resolved": "https://registry.npmjs.org/react-textarea-autosize/-/react-textarea-autosize-8.5.9.tgz",
+ "integrity": "sha512-U1DGlIQN5AwgjTyOEnI1oCcMuEr1pv1qOtklB2l4nyMGbHzWrI0eFsYK0zos2YWqAolJyG0IWJaqWmWj5ETh0A==",
+ "license": "MIT",
+ "dependencies": {
+ "@babel/runtime": "^7.20.13",
+ "use-composed-ref": "^1.3.0",
+ "use-latest": "^1.2.1"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
+ }
+ },
"node_modules/read-cache": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz",
@@ -10126,6 +10144,51 @@
}
}
},
+ "node_modules/use-composed-ref": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/use-composed-ref/-/use-composed-ref-1.4.0.tgz",
+ "integrity": "sha512-djviaxuOOh7wkj0paeO1Q/4wMZ8Zrnag5H6yBvzN7AKKe8beOaED9SF5/ByLqsku8NP4zQqsvM2u3ew/tJK8/w==",
+ "license": "MIT",
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/use-isomorphic-layout-effect": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/use-isomorphic-layout-effect/-/use-isomorphic-layout-effect-1.2.1.tgz",
+ "integrity": "sha512-tpZZ+EX0gaghDAiFR37hj5MgY6ZN55kLiPkJsKxBMZ6GZdOSPJXiOzPM984oPYZ5AnehYx5WQp1+ME8I/P/pRA==",
+ "license": "MIT",
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/use-latest": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/use-latest/-/use-latest-1.3.0.tgz",
+ "integrity": "sha512-mhg3xdm9NaM8q+gLT8KryJPnRFOz1/5XPBhmDEVZK1webPzDjrPk7f/mbpeLqTgB9msytYWANxgALOCJKnLvcQ==",
+ "license": "MIT",
+ "dependencies": {
+ "use-isomorphic-layout-effect": "^1.1.1"
+ },
+ "peerDependencies": {
+ "react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
+ },
+ "peerDependenciesMeta": {
+ "@types/react": {
+ "optional": true
+ }
+ }
+ },
"node_modules/use-sidecar": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/use-sidecar/-/use-sidecar-1.1.3.tgz",
diff --git a/frontend/package.json b/frontend/package.json
index bc9eb72c..fd996c33 100644
--- a/frontend/package.json
+++ b/frontend/package.json
@@ -45,6 +45,7 @@
"react-icons": "^5.5.0",
"react-markdown": "^10.1.0",
"react-syntax-highlighter": "^15.6.1",
+ "react-textarea-autosize": "^8.5.9",
"rehype-mathjax": "^7.1.0",
"rehype-raw": "^7.0.0",
"remark-gfm": "^4.0.1",
diff --git a/frontend/src/app/chat/page.tsx b/frontend/src/app/chat/page.tsx
index 240bb4d2..01ee43c7 100644
--- a/frontend/src/app/chat/page.tsx
+++ b/frontend/src/app/chat/page.tsx
@@ -3,8 +3,10 @@
import {
AtSign,
Bot,
+ Check,
ChevronDown,
ChevronRight,
+ Funnel,
GitBranch,
Loader2,
Plus,
@@ -15,10 +17,17 @@ import {
Zap,
} from "lucide-react";
import { useEffect, useRef, useState } from "react";
+import TextareaAutosize from "react-textarea-autosize";
+import { filterAccentClasses } from "@/components/knowledge-filter-panel";
import { MarkdownRenderer } from "@/components/markdown-renderer";
import { ProtectedRoute } from "@/components/protected-route";
import { Avatar, AvatarFallback, AvatarImage } from "@/components/ui/avatar";
import { Button } from "@/components/ui/button";
+import {
+ Popover,
+ PopoverAnchor,
+ PopoverContent,
+} from "@/components/ui/popover";
import { useAuth } from "@/contexts/auth-context";
import { type EndpointType, useChat } from "@/contexts/chat-context";
import { useKnowledgeFilter } from "@/contexts/knowledge-filter-context";
@@ -26,7 +35,6 @@ import { useTask } from "@/contexts/task-context";
import { useLoadingStore } from "@/stores/loadingStore";
import { useGetNudgesQuery } from "../api/queries/useGetNudgesQuery";
import Nudges from "./nudges";
-import { filterAccentClasses } from "@/components/knowledge-filter-panel";
interface Message {
role: "user" | "assistant";
@@ -125,16 +133,20 @@ function ChatPage() {
const [availableFilters, setAvailableFilters] = useState<
KnowledgeFilterData[]
>([]);
+ const [textareaHeight, setTextareaHeight] = useState(40);
const [filterSearchTerm, setFilterSearchTerm] = useState("");
const [selectedFilterIndex, setSelectedFilterIndex] = useState(0);
const [isFilterHighlighted, setIsFilterHighlighted] = useState(false);
const [dropdownDismissed, setDropdownDismissed] = useState(false);
const [isUserInteracting, setIsUserInteracting] = useState(false);
const [isForkingInProgress, setIsForkingInProgress] = useState(false);
+ const [anchorPosition, setAnchorPosition] = useState<{
+ x: number;
+ y: number;
+ } | null>(null);
const messagesEndRef = useRef(null);
const inputRef = useRef(null);
const fileInputRef = useRef(null);
- const dropdownRef = useRef(null);
const streamAbortRef = useRef(null);
const streamIdRef = useRef(0);
const lastLoadedConversationRef = useRef(null);
@@ -146,6 +158,59 @@ function ChatPage() {
messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
};
+ const getCursorPosition = (textarea: HTMLTextAreaElement) => {
+ // Create a hidden div with the same styles as the textarea
+ const div = document.createElement("div");
+ const computedStyle = getComputedStyle(textarea);
+
+ // Copy all computed styles to the hidden div
+ for (const style of computedStyle) {
+ (div.style as any)[style] = computedStyle.getPropertyValue(style);
+ }
+
+ // Set the div to be hidden but not un-rendered
+ div.style.position = "absolute";
+ div.style.visibility = "hidden";
+ div.style.whiteSpace = "pre-wrap";
+ div.style.wordWrap = "break-word";
+ div.style.overflow = "hidden";
+ div.style.height = "auto";
+ div.style.width = `${textarea.getBoundingClientRect().width}px`;
+
+ // Get the text up to the cursor position
+ const cursorPos = textarea.selectionStart || 0;
+ const textBeforeCursor = textarea.value.substring(0, cursorPos);
+
+ // Add the text before cursor
+ div.textContent = textBeforeCursor;
+
+ // Create a span to mark the end position
+ const span = document.createElement("span");
+ span.textContent = "|"; // Cursor marker
+ div.appendChild(span);
+
+ // Add the text after cursor to handle word wrapping
+ const textAfterCursor = textarea.value.substring(cursorPos);
+ div.appendChild(document.createTextNode(textAfterCursor));
+
+ // Add the div to the document temporarily
+ document.body.appendChild(div);
+
+ // Get positions
+ const inputRect = textarea.getBoundingClientRect();
+ const divRect = div.getBoundingClientRect();
+ const spanRect = span.getBoundingClientRect();
+
+ // Calculate the cursor position relative to the input
+ const x = inputRect.left + (spanRect.left - divRect.left);
+ const y = inputRect.top + (spanRect.top - divRect.top);
+
+ // Clean up
+ document.body.removeChild(div);
+
+ return { x, y };
+ };
+
const handleEndpointChange = (newEndpoint: EndpointType) => {
setEndpoint(newEndpoint);
// Clear the conversation when switching endpoints to avoid response ID conflicts
@@ -193,7 +258,7 @@ function ChatPage() {
"Upload failed with status:",
response.status,
"Response:",
- errorText
+ errorText,
);
throw new Error("Failed to process document");
}
@@ -314,13 +379,6 @@ function ChatPage() {
}
};
- const handleFilterDropdownToggle = () => {
- if (!isFilterDropdownOpen) {
- loadAvailableFilters();
- }
- setIsFilterDropdownOpen(!isFilterDropdownOpen);
- };
-
const handleFilterSelect = (filter: KnowledgeFilterData | null) => {
setSelectedFilter(filter);
setIsFilterDropdownOpen(false);
@@ -410,7 +468,7 @@ function ChatPage() {
console.log(
"Loading conversation with",
conversationData.messages.length,
- "messages"
+ "messages",
);
// Convert backend message format to frontend Message interface
const convertedMessages: Message[] = conversationData.messages.map(
@@ -538,7 +596,7 @@ function ChatPage() {
) === "string"
? toolCall.function?.arguments || toolCall.arguments
: JSON.stringify(
- toolCall.function?.arguments || toolCall.arguments
+ toolCall.function?.arguments || toolCall.arguments,
),
result: toolCall.result,
status: "completed",
@@ -557,7 +615,7 @@ function ChatPage() {
}
return message;
- }
+ },
);
setMessages(convertedMessages);
@@ -646,7 +704,7 @@ function ChatPage() {
console.log(
"Chat page received file upload error event:",
filename,
- error
+ error,
);
// Replace the last message with error message
@@ -660,64 +718,43 @@ function ChatPage() {
window.addEventListener(
"fileUploadStart",
- handleFileUploadStart as EventListener
+ handleFileUploadStart as EventListener,
);
window.addEventListener(
"fileUploaded",
- handleFileUploaded as EventListener
+ handleFileUploaded as EventListener,
);
window.addEventListener(
"fileUploadComplete",
- handleFileUploadComplete as EventListener
+ handleFileUploadComplete as EventListener,
);
window.addEventListener(
"fileUploadError",
- handleFileUploadError as EventListener
+ handleFileUploadError as EventListener,
);
return () => {
window.removeEventListener(
"fileUploadStart",
- handleFileUploadStart as EventListener
+ handleFileUploadStart as EventListener,
);
window.removeEventListener(
"fileUploaded",
- handleFileUploaded as EventListener
+ handleFileUploaded as EventListener,
);
window.removeEventListener(
"fileUploadComplete",
- handleFileUploadComplete as EventListener
+ handleFileUploadComplete as EventListener,
);
window.removeEventListener(
"fileUploadError",
- handleFileUploadError as EventListener
+ handleFileUploadError as EventListener,
);
};
}, [endpoint, setPreviousResponseIds]);
- // Handle click outside to close dropdown
- useEffect(() => {
- const handleClickOutside = (event: MouseEvent) => {
- if (
- isFilterDropdownOpen &&
- dropdownRef.current &&
- !dropdownRef.current.contains(event.target as Node) &&
- !inputRef.current?.contains(event.target as Node)
- ) {
- setIsFilterDropdownOpen(false);
- setFilterSearchTerm("");
- setSelectedFilterIndex(0);
- }
- };
-
- document.addEventListener("mousedown", handleClickOutside);
- return () => {
- document.removeEventListener("mousedown", handleClickOutside);
- };
- }, [isFilterDropdownOpen]);
-
const { data: nudges = [], cancel: cancelNudges } = useGetNudgesQuery(
- previousResponseIds[endpoint]
+ previousResponseIds[endpoint],
);
const handleSSEStream = async (userMessage: Message) => {
@@ -822,7 +859,7 @@ function ChatPage() {
console.log(
"Received chunk:",
chunk.type || chunk.object,
- chunk
+ chunk,
);
// Extract response ID if present
@@ -838,14 +875,14 @@ function ChatPage() {
if (chunk.delta.function_call) {
console.log(
"Function call in delta:",
- chunk.delta.function_call
+ chunk.delta.function_call,
);
// Check if this is a new function call
if (chunk.delta.function_call.name) {
console.log(
"New function call:",
- chunk.delta.function_call.name
+ chunk.delta.function_call.name,
);
const functionCall: FunctionCall = {
name: chunk.delta.function_call.name,
@@ -861,7 +898,7 @@ function ChatPage() {
else if (chunk.delta.function_call.arguments) {
console.log(
"Function call arguments delta:",
- chunk.delta.function_call.arguments
+ chunk.delta.function_call.arguments,
);
const lastFunctionCall =
currentFunctionCalls[currentFunctionCalls.length - 1];
@@ -873,14 +910,14 @@ function ChatPage() {
chunk.delta.function_call.arguments;
console.log(
"Accumulated arguments:",
- lastFunctionCall.argumentsString
+ lastFunctionCall.argumentsString,
);
// Try to parse arguments if they look complete
if (lastFunctionCall.argumentsString.includes("}")) {
try {
const parsed = JSON.parse(
- lastFunctionCall.argumentsString
+ lastFunctionCall.argumentsString,
);
lastFunctionCall.arguments = parsed;
lastFunctionCall.status = "completed";
@@ -888,7 +925,7 @@ function ChatPage() {
} catch (e) {
console.log(
"Arguments not yet complete or invalid JSON:",
- e
+ e,
);
}
}
@@ -921,7 +958,7 @@ function ChatPage() {
else if (toolCall.function.arguments) {
console.log(
"Tool call arguments delta:",
- toolCall.function.arguments
+ toolCall.function.arguments,
);
const lastFunctionCall =
currentFunctionCalls[
@@ -935,7 +972,7 @@ function ChatPage() {
toolCall.function.arguments;
console.log(
"Accumulated tool arguments:",
- lastFunctionCall.argumentsString
+ lastFunctionCall.argumentsString,
);
// Try to parse arguments if they look complete
@@ -944,7 +981,7 @@ function ChatPage() {
) {
try {
const parsed = JSON.parse(
- lastFunctionCall.argumentsString
+ lastFunctionCall.argumentsString,
);
lastFunctionCall.arguments = parsed;
lastFunctionCall.status = "completed";
@@ -952,7 +989,7 @@ function ChatPage() {
} catch (e) {
console.log(
"Tool arguments not yet complete or invalid JSON:",
- e
+ e,
);
}
}
@@ -984,7 +1021,7 @@ function ChatPage() {
console.log(
"Error parsing function call on finish:",
fc,
- e
+ e,
);
}
}
@@ -1000,12 +1037,12 @@ function ChatPage() {
console.log(
"🟢 CREATING function call (added):",
chunk.item.id,
- chunk.item.tool_name || chunk.item.name
+ chunk.item.tool_name || chunk.item.name,
);
// Try to find an existing pending call to update (created by earlier deltas)
let existing = currentFunctionCalls.find(
- (fc) => fc.id === chunk.item.id
+ (fc) => fc.id === chunk.item.id,
);
if (!existing) {
existing = [...currentFunctionCalls]
@@ -1014,7 +1051,7 @@ function ChatPage() {
(fc) =>
fc.status === "pending" &&
!fc.id &&
- fc.name === (chunk.item.tool_name || chunk.item.name)
+ fc.name === (chunk.item.tool_name || chunk.item.name),
);
}
@@ -1027,7 +1064,7 @@ function ChatPage() {
chunk.item.inputs || existing.arguments;
console.log(
"🟢 UPDATED existing pending function call with id:",
- existing.id
+ existing.id,
);
} else {
const functionCall: FunctionCall = {
@@ -1045,7 +1082,7 @@ function ChatPage() {
currentFunctionCalls.map((fc) => ({
id: fc.id,
name: fc.name,
- }))
+ })),
);
}
}
@@ -1056,7 +1093,7 @@ function ChatPage() {
) {
console.log(
"Function args delta (Realtime API):",
- chunk.delta
+ chunk.delta,
);
const lastFunctionCall =
currentFunctionCalls[currentFunctionCalls.length - 1];
@@ -1067,7 +1104,7 @@ function ChatPage() {
lastFunctionCall.argumentsString += chunk.delta || "";
console.log(
"Accumulated arguments (Realtime API):",
- lastFunctionCall.argumentsString
+ lastFunctionCall.argumentsString,
);
}
}
@@ -1078,26 +1115,26 @@ function ChatPage() {
) {
console.log(
"Function args done (Realtime API):",
- chunk.arguments
+ chunk.arguments,
);
const lastFunctionCall =
currentFunctionCalls[currentFunctionCalls.length - 1];
if (lastFunctionCall) {
try {
lastFunctionCall.arguments = JSON.parse(
- chunk.arguments || "{}"
+ chunk.arguments || "{}",
);
lastFunctionCall.status = "completed";
console.log(
"Parsed function arguments (Realtime API):",
- lastFunctionCall.arguments
+ lastFunctionCall.arguments,
);
} catch (e) {
lastFunctionCall.arguments = { raw: chunk.arguments };
lastFunctionCall.status = "error";
console.log(
"Error parsing function arguments (Realtime API):",
- e
+ e,
);
}
}
@@ -1111,14 +1148,14 @@ function ChatPage() {
console.log(
"🔵 UPDATING function call (done):",
chunk.item.id,
- chunk.item.tool_name || chunk.item.name
+ chunk.item.tool_name || chunk.item.name,
);
console.log(
"🔵 Looking for existing function calls:",
currentFunctionCalls.map((fc) => ({
id: fc.id,
name: fc.name,
- }))
+ })),
);
// Find existing function call by ID or name
@@ -1126,14 +1163,14 @@ function ChatPage() {
(fc) =>
fc.id === chunk.item.id ||
fc.name === chunk.item.tool_name ||
- fc.name === chunk.item.name
+ fc.name === chunk.item.name,
);
if (functionCall) {
console.log(
"🔵 FOUND existing function call, updating:",
functionCall.id,
- functionCall.name
+ functionCall.name,
);
// Update existing function call with completion data
functionCall.status =
@@ -1156,7 +1193,7 @@ function ChatPage() {
"🔴 WARNING: Could not find existing function call to update:",
chunk.item.id,
chunk.item.tool_name,
- chunk.item.name
+ chunk.item.name,
);
}
}
@@ -1177,7 +1214,7 @@ function ChatPage() {
fc.name === chunk.item.name ||
fc.name === chunk.item.type ||
fc.name.includes(chunk.item.type.replace("_call", "")) ||
- chunk.item.type.includes(fc.name)
+ chunk.item.type.includes(fc.name),
);
if (functionCall) {
@@ -1221,12 +1258,12 @@ function ChatPage() {
"🟡 CREATING tool call (added):",
chunk.item.id,
chunk.item.tool_name || chunk.item.name,
- chunk.item.type
+ chunk.item.type,
);
// Dedupe by id or pending with same name
let existing = currentFunctionCalls.find(
- (fc) => fc.id === chunk.item.id
+ (fc) => fc.id === chunk.item.id,
);
if (!existing) {
existing = [...currentFunctionCalls]
@@ -1238,7 +1275,7 @@ function ChatPage() {
fc.name ===
(chunk.item.tool_name ||
chunk.item.name ||
- chunk.item.type)
+ chunk.item.type),
);
}
@@ -1254,7 +1291,7 @@ function ChatPage() {
chunk.item.inputs || existing.arguments;
console.log(
"🟡 UPDATED existing pending tool call with id:",
- existing.id
+ existing.id,
);
} else {
const functionCall = {
@@ -1275,7 +1312,7 @@ function ChatPage() {
id: fc.id,
name: fc.name,
type: fc.type,
- }))
+ })),
);
}
}
@@ -1553,7 +1590,7 @@ function ChatPage() {
const handleForkConversation = (
messageIndex: number,
- event?: React.MouseEvent
+ event?: React.MouseEvent,
) => {
// Prevent any default behavior and stop event propagation
if (event) {
@@ -1618,7 +1655,7 @@ function ChatPage() {
const renderFunctionCalls = (
functionCalls: FunctionCall[],
- messageIndex?: number
+ messageIndex?: number,
) => {
if (!functionCalls || functionCalls.length === 0) return null;
@@ -1851,6 +1888,162 @@ function ChatPage() {
handleSendMessage(suggestion);
};
+ const handleKeyDown = (e: React.KeyboardEvent) => {
+ // Handle backspace for filter clearing
+ if (e.key === "Backspace" && selectedFilter && input.trim() === "") {
+ e.preventDefault();
+
+ if (isFilterHighlighted) {
+ // Second backspace - remove the filter
+ setSelectedFilter(null);
+ setIsFilterHighlighted(false);
+ } else {
+ // First backspace - highlight the filter
+ setIsFilterHighlighted(true);
+ }
+ return;
+ }
+
+ if (isFilterDropdownOpen) {
+ const filteredFilters = availableFilters.filter((filter) =>
+ filter.name.toLowerCase().includes(filterSearchTerm.toLowerCase()),
+ );
+
+ if (e.key === "Escape") {
+ e.preventDefault();
+ setIsFilterDropdownOpen(false);
+ setFilterSearchTerm("");
+ setSelectedFilterIndex(0);
+ setDropdownDismissed(true);
+
+ // Keep focus on the textarea so user can continue typing normally
+ inputRef.current?.focus();
+ return;
+ }
+
+ if (e.key === "ArrowDown") {
+ e.preventDefault();
+ setSelectedFilterIndex((prev) =>
+ prev < filteredFilters.length - 1 ? prev + 1 : 0,
+ );
+ return;
+ }
+
+ if (e.key === "ArrowUp") {
+ e.preventDefault();
+ setSelectedFilterIndex((prev) =>
+ prev > 0 ? prev - 1 : filteredFilters.length - 1,
+ );
+ return;
+ }
+
+ if (e.key === "Enter") {
+ // Check if we're at the end of an @ mention (space before cursor or end of input)
+ const cursorPos = e.currentTarget.selectionStart || 0;
+ const textBeforeCursor = input.slice(0, cursorPos);
+ const words = textBeforeCursor.split(" ");
+ const lastWord = words[words.length - 1];
+
+ if (lastWord.startsWith("@") && filteredFilters[selectedFilterIndex]) {
+ e.preventDefault();
+ handleFilterSelect(filteredFilters[selectedFilterIndex]);
+ return;
+ }
+ }
+
+ if (e.key === " ") {
+ // Select filter on space if we're typing an @ mention
+ const cursorPos = e.currentTarget.selectionStart || 0;
+ const textBeforeCursor = input.slice(0, cursorPos);
+ const words = textBeforeCursor.split(" ");
+ const lastWord = words[words.length - 1];
+
+ if (lastWord.startsWith("@") && filteredFilters[selectedFilterIndex]) {
+ e.preventDefault();
+ handleFilterSelect(filteredFilters[selectedFilterIndex]);
+ return;
+ }
+ }
+ }
+
+ if (e.key === "Enter" && !e.shiftKey && !isFilterDropdownOpen) {
+ e.preventDefault();
+ if (input.trim() && !loading) {
+ // Trigger form submission by finding the form and calling submit
+ const form = e.currentTarget.closest("form");
+ if (form) {
+ form.requestSubmit();
+ }
+ }
+ }
+ };
+
+ const onChange = (e: React.ChangeEvent) => {
+ const newValue = e.target.value;
+ setInput(newValue);
+
+ // Clear filter highlight when user starts typing
+ if (isFilterHighlighted) {
+ setIsFilterHighlighted(false);
+ }
+
+ // Find if there's an @ at the start of the last word
+ const words = newValue.split(" ");
+ const lastWord = words[words.length - 1];
+
+ if (lastWord.startsWith("@") && !dropdownDismissed) {
+ const searchTerm = lastWord.slice(1); // Remove the @
+ console.log("Setting search term:", searchTerm);
+ setFilterSearchTerm(searchTerm);
+ setSelectedFilterIndex(0);
+
+ // Only set anchor position when @ is first detected (search term is empty)
+ if (searchTerm === "") {
+ const pos = getCursorPosition(e.target);
+ setAnchorPosition(pos);
+ }
+
+ if (!isFilterDropdownOpen) {
+ loadAvailableFilters();
+ setIsFilterDropdownOpen(true);
+ }
+ } else if (isFilterDropdownOpen) {
+ // Close dropdown if @ is no longer present
+ console.log("Closing dropdown - no @ found");
+ setIsFilterDropdownOpen(false);
+ setFilterSearchTerm("");
+ }
+
+ // Reset dismissed flag when user moves to a different word
+ if (dropdownDismissed && !lastWord.startsWith("@")) {
+ setDropdownDismissed(false);
+ }
+ };
+
+ const onAtClick = () => {
+ if (!isFilterDropdownOpen) {
+ loadAvailableFilters();
+ setIsFilterDropdownOpen(true);
+ setFilterSearchTerm("");
+ setSelectedFilterIndex(0);
+
+ // Get button position for popover anchoring
+ const button = document.querySelector(
+ "[data-filter-button]",
+ ) as HTMLElement;
+ if (button) {
+ const rect = button.getBoundingClientRect();
+ setAnchorPosition({
+ x: rect.left + rect.width / 2,
+ y: rect.top + rect.height / 2 - 12,
+ });
+ }
+ } else {
+ setIsFilterDropdownOpen(false);
+ setAnchorPosition(null);
+ }
+ };
+
return (