Merge branch 'main' into docling-presets

This commit is contained in:
Mike Fortman 2025-09-23 15:14:05 -05:00 committed by GitHub
commit ee16809c14
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
50 changed files with 30361 additions and 1007 deletions

266
.github/workflows/deploy-docs-draft.yml vendored Normal file
View file

@ -0,0 +1,266 @@
name: Pull Request Docs Draft
on:
pull_request:
branches:
- '**'
paths:
- 'docs/**'
- '.github/workflows/deploy-docs-draft.yml'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
jobs:
build-and-deploy:
runs-on: ubuntu-latest
if: "! github.event.pull_request.head.repo.fork"
steps:
- name: Checkout Repository
uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: 20
cache: yarn
cache-dependency-path: ./docs/yarn.lock
- name: Validate Branch Names
run: |
# Check if branch names contain invalid characters. Only alphanumeric, _, -, ., and / are allowed.
validate_branch_name() {
local branch_name="$1"
if [[ ! "$branch_name" =~ ^[a-zA-Z0-9/_\.-]+$ ]]; then
echo "Error: Branch name contains invalid characters. Only alphanumeric, _, -, ., and / are allowed."
exit 1
fi
}
validate_branch_name "${{ github.event.pull_request.head.ref }}"
- name: Extract Branch Names
id: extract_branch
run: |
# Extract and transform branch names
extract_branch() {
local input_branch="$1"
# Check if input_branch starts with "refs/heads/"
if [[ "$input_branch" == refs/heads/* ]]; then
# Remove "refs/heads/" prefix safely using parameter expansion
branch_name="${input_branch#refs/heads/}"
echo "$branch_name"
else
echo "$input_branch"
fi
}
# Transform branch names in form of `refs/heads/main` to `main`
draft_branch=$(extract_branch "${{ github.event.pull_request.head.ref }}")
# Replace / with - in the draft branch name to use as a directory name
draft_directory=$(echo "$draft_branch" | tr / -)
# Safe echo to $GITHUB_OUTPUT
{
echo "draft_branch=$draft_branch"
echo "draft_directory=$draft_directory"
} >> "$GITHUB_OUTPUT"
- name: Set Draft URL
id: draft_url
if: success()
run: |
echo "url=${{ vars.DOCS_DRAFT_BASE_URL }}/langflow-drafts/${{ steps.extract_branch.outputs.draft_directory }}/index.html" >> $GITHUB_OUTPUT
- name: Install dependencies
run: cd docs && yarn install
- name: Build website
if: success()
run: |
set -o pipefail
cd docs
yarn build |& tee $GITHUB_WORKSPACE/build.log
env:
BASE_URL: /langflow-drafts/${{ steps.extract_branch.outputs.draft_directory }}
FORCE_COLOR: 0 # Disable color output
# SEGMENT_PUBLIC_WRITE_KEY: ${{ vars.DOCS_DRAFT_SEGMENT_PUBLIC_WRITE_KEY }}
- name: Check Build Result
id: buildLogFail
if: failure()
run: |
MULTILINE_LOG=$(cat $GITHUB_WORKSPACE/build.log)
echo "BUILD_FAILURE<<EOF" >> $GITHUB_ENV
echo $MULTILINE_LOG >> $GITHUB_ENV
echo "EOF" >> $GITHUB_ENV
- name: Hide Previous Build Comments
if: ${{ github.event.pull_request.number && (success() || failure()) }}
run: |
set -e
# Get all comments on the PR that match our build comments
comments=$(gh api repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/comments \
--jq '.[] | select(.body | test("Build failure! :x:|Build successful! :white_check_mark:")) | .node_id')
# Minimize each matching comment using GraphQL API
if [[ -n "$comments" ]]; then
echo "Found previous build comments to hide"
while IFS= read -r comment_id; do
if [[ -n "$comment_id" ]]; then
echo "Minimizing comment: $comment_id"
gh api graphql \
--field id="$comment_id" \
--field classifier="OUTDATED" \
--raw-field query='
mutation($id: ID!, $classifier: ReportedContentClassifiers!) {
minimizeComment(input: { subjectId: $id, classifier: $classifier }) {
minimizedComment {
isMinimized
}
}
}' || echo "Failed to minimize comment $comment_id, continuing..."
echo
fi
done <<< "$comments"
else
echo "No previous build comments found to hide"
fi
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Assemble Build Success Comment
if: success()
run: |
build_success_comment="Build successful! :white_check_mark:"
build_success_comment+="\nDeploying docs draft."
echo "BUILD_SUCCESS_COMMENT<<EOF" >> $GITHUB_ENV
echo -e "$build_success_comment" >> $GITHUB_ENV
echo "EOF" >> $GITHUB_ENV
- name: Create Build Success Comment
if: success()
uses: peter-evans/create-or-update-comment@v4
with:
issue-number: ${{ github.event.pull_request.number }}
body: "${{ env.BUILD_SUCCESS_COMMENT }}"
reactions: rocket
- name: Create Build Failure Comment
if: failure()
uses: peter-evans/create-or-update-comment@v4
with:
issue-number: ${{ github.event.pull_request.number }}
body: |
Build failure! :x:
> ${{ env.BUILD_FAILURE }}
reactions: confused
- name: Find Comment
id: fc
if: success()
uses: peter-evans/find-comment@v3
with:
issue-number: ${{ github.event.pull_request.number }}
body-includes: Build successful!
direction: last
- name: Configure AWS CLI
if: success()
run: |
aws configure set aws_access_key_id ${{ secrets.DOCS_AWS_ACCESS_KEY_ID }}
aws configure set aws_secret_access_key ${{ secrets.DOCS_AWS_SECRET_ACCESS_KEY }}
aws configure set region us-west-2
- name: Check for New Assets
run: |
set -o pipefail
echo "Checking for new assets." |& tee -a $GITHUB_WORKSPACE/deploy.log
echo "aws s3 sync docs/build/assets/ s3://${{ vars.DOCS_DRAFT_S3_BUCKET_NAME }}/langflow-drafts/${{ steps.extract_branch.outputs.draft_directory }}/assets/ --size-only --dryrun --no-progress" | tee -a $GITHUB_WORKSPACE/deploy.log
aws s3 sync docs/build/assets/ "s3://${{ vars.DOCS_DRAFT_S3_BUCKET_NAME }}/langflow-drafts/${{ steps.extract_branch.outputs.draft_directory }}/assets/" --size-only --dryrun --no-progress | tee $GITHUB_WORKSPACE/assets.log
- name: Determine Standard or Full Publish
id: check_full_publish
run: |
# Determine if a full publish is required because of new assets.
if grep -qE '(upload:|delete:)' "$GITHUB_WORKSPACE/assets.log"; then
echo "New assets. Perform full publish: true" | tee -a "$GITHUB_WORKSPACE/deploy.log"
echo "perform_full_publish=true" >> "$GITHUB_OUTPUT"
else
echo "No new assets. Perform full publish: false" | tee -a "$GITHUB_WORKSPACE/deploy.log"
echo "perform_full_publish=false" >> "$GITHUB_OUTPUT"
fi
- name: Deploy to S3
if: success()
run: |
set -o pipefail
cd docs
mkdir langflow-drafts
mv build langflow-drafts/${{ steps.extract_branch.outputs.draft_directory }}
cd langflow-drafts
# Records the repository that originally triggered the build so we can post back
# comments upon clean up of a stale draft if it still has an open pull request.
echo "${{ github.event.repository.full_name }}" > ${{ steps.extract_branch.outputs.draft_directory }}/.github_source_repository
s3_params=(
# Hide upload progress for a cleaner sync log
--no-progress
--delete
--exclude "*"
--include "${{ steps.extract_branch.outputs.draft_directory }}/*"
)
if [[ "${{ steps.check_full_publish.outputs.perform_full_publish }}" == "false" ]]; then
s3_params+=(--size-only)
fi
echo "Deploying draft to S3." |& tee -a $GITHUB_WORKSPACE/deploy.log
echo "aws s3 sync . s3://${{ vars.DOCS_DRAFT_S3_BUCKET_NAME }}/langflow-drafts ${s3_params[@]}" |& tee -a $GITHUB_WORKSPACE/deploy.log
aws s3 sync . "s3://${{ vars.DOCS_DRAFT_S3_BUCKET_NAME }}/langflow-drafts" "${s3_params[@]}" |& tee -a $GITHUB_WORKSPACE/deploy.log
# Update .github_source_repository file metadata to mark last modified time of the draft.
# This will allow us to later determine if a draft is stale and needs to be cleaned up.
echo "Marking last modified time of the draft." |& tee -a $GITHUB_WORKSPACE/deploy.log
echo "aws s3 cp --metadata '{\"touched\": \"now\"}' \
s3://${{ vars.DOCS_DRAFT_S3_BUCKET_NAME }}/langflow-drafts/${{ steps.extract_branch.outputs.draft_directory }}/.github_source_repository \
s3://${{ vars.DOCS_DRAFT_S3_BUCKET_NAME }}/langflow-drafts/${{ steps.extract_branch.outputs.draft_directory }}/.github_source_repository" \
|& tee -a $GITHUB_WORKSPACE/deploy.log
aws s3 cp --metadata '{ "touched": "now" }' \
s3://${{ vars.DOCS_DRAFT_S3_BUCKET_NAME }}/langflow-drafts/${{ steps.extract_branch.outputs.draft_directory }}/.github_source_repository \
s3://${{ vars.DOCS_DRAFT_S3_BUCKET_NAME }}/langflow-drafts/${{ steps.extract_branch.outputs.draft_directory }}/.github_source_repository \
|& tee -a $GITHUB_WORKSPACE/deploy.log
- name: Invalidate CloudFront Cache
if: success()
run: |
invalidation_batch="{ \"Paths\": { \"Quantity\": 1, \"Items\": [\"/langflow-drafts/${{ steps.extract_branch.outputs.draft_directory }}/*\"] }, \"CallerReference\": \"langflow-docs-draft-files-$(date +%s)\" }"
echo $invalidation_batch | jq . |& tee -a "$GITHUB_WORKSPACE/deploy.log"
echo "Creating invalidation." |& tee -a "$GITHUB_WORKSPACE/deploy.log"
invalidation_id=$(aws cloudfront create-invalidation --distribution-id "${{ vars.DOCS_DRAFT_CLOUD_FRONT_DISTRIBUTION_ID }}" --invalidation-batch "$invalidation_batch" --query 'Invalidation.Id' --output text |& tee -a "$GITHUB_WORKSPACE/deploy.log")
echo "Awaiting invalidation." |& tee -a "$GITHUB_WORKSPACE/deploy.log"
aws cloudfront wait invalidation-completed --distribution-id "${{ vars.DOCS_DRAFT_CLOUD_FRONT_DISTRIBUTION_ID }}" --id "$invalidation_id" |& tee -a "$GITHUB_WORKSPACE/deploy.log"
echo "Invalidation complete." |& tee -a "$GITHUB_WORKSPACE/deploy.log"
- name: Update Comment
if: ${{ steps.fc.outputs.comment-id != '' }}
uses: peter-evans/create-or-update-comment@v4
with:
comment-id: ${{ steps.fc.outputs.comment-id }}
body: |
Deploy successful! [View draft](${{ steps.draft_url.outputs.url }})
reactions: hooray
- name: Upload Deploy Log
uses: actions/upload-artifact@v4
if: always()
with:
name: deploy.log
path: ${{ github.workspace }}/deploy.log

43
.github/workflows/deploy-gh-pages.yml vendored Normal file
View file

@ -0,0 +1,43 @@
name: Deploy to GitHub Pages
on:
push:
branches:
- main
paths:
- 'docs/**'
# Review gh actions docs if you want to further define triggers, paths, etc
# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#on
jobs:
deploy:
name: Deploy to GitHub Pages
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: 20
cache: yarn
cache-dependency-path: ./docs/yarn.lock
- name: Install dependencies
run: cd docs && yarn install
- name: Build website
run: cd docs && yarn build
# env:
# SEGMENT_PUBLIC_WRITE_KEY: ${{ vars.DOCS_PROD_SEGMENT_PUBLIC_WRITE_KEY }}
# Popular action to deploy to GitHub Pages:
# Docs: https://github.com/peaceiris/actions-gh-pages#%EF%B8%8F-docusaurus
- name: Deploy to GitHub Pages
uses: peaceiris/actions-gh-pages@v4
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
# Build output to publish to the `gh-pages` branch:
publish_dir: ./docs/build
# The following lines assign commit authorship to the official
# GH-Actions bot for deploys to `gh-pages` branch:
# https://github.com/actions/checkout/issues/13#issuecomment-724415212
# The GH actions bot is used by default if you didn't specify the two fields.
# You can swap them out with your own user credentials.

View file

@ -62,7 +62,7 @@ LANGFLOW_CHAT_FLOW_ID=your_chat_flow_id
LANGFLOW_INGEST_FLOW_ID=your_ingest_flow_id
NUDGES_FLOW_ID=your_nudges_flow_id
```
See extended configuration, including ingestion and optional variables: [docs/configuration.md](docs/configuration.md)
See extended configuration, including ingestion and optional variables: [docs/configure/configuration.md](docs/docs/configure/configuration.md)
### 3. Start OpenRAG
```bash
@ -98,7 +98,7 @@ uv run openrag
### TUI Features
See the full TUI guide for features, navigation, and benefits: [docs/tui.md](docs/tui.md)
See the full TUI guide for features, navigation, and benefits: [docs/get-started/tui.mdx](docs/docs/get-started/tui.mdx)
@ -121,7 +121,7 @@ For environments without GPU support:
docker compose -f docker-compose-cpu.yml up -d
```
More deployment commands and tips: [docs/docker.md](docs/docker.md)
More deployment commands and tips: [docs/get-started/docker.mdx](docs/docs/get-started/docker.mdx)
## 🔧 Troubleshooting
@ -138,7 +138,7 @@ podman machine start
### Common Issues
See common issues and fixes: [docs/troubleshooting.md](docs/troubleshooting.md)
See common issues and fixes: [docs/reference/troubleshooting.mdx](docs/docs/reference/troubleshooting.mdx)

25
docs/.gitignore vendored Normal file
View file

@ -0,0 +1,25 @@
# Dependencies
/node_modules
# Production
/build
# Generated files
.docusaurus
.cache-loader
# Misc
.DS_Store
.env.local
.env.development.local
.env.test.local
.env.production.local
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# Override parent .gitignore to allow package management files
!package.json
!package-lock.json
!yarn.lock

41
docs/README.md Normal file
View file

@ -0,0 +1,41 @@
# Website
This website is built using [Docusaurus](https://docusaurus.io/), a modern static website generator.
## Installation
```bash
yarn
```
## Local Development
```bash
yarn start
```
This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server.
## Build
```bash
yarn build
```
This command generates static content into the `build` directory and can be served using any static contents hosting service.
## Deployment
Using SSH:
```bash
USE_SSH=true yarn deploy
```
Not using SSH:
```bash
GIT_USER=<Your GitHub username> yarn deploy
```
If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch.

View file

@ -1,3 +1,8 @@
---
title: Configuration
slug: /configure/configuration
---
# Configuration
OpenRAG supports multiple configuration methods with the following priority:

View file

@ -1,3 +1,8 @@
---
title: Docker Deployment
slug: /get-started/docker
---
# Docker Deployment
## Standard Deployment

View file

@ -0,0 +1,48 @@
---
title: What is OpenRAG?
slug: /
---
# OpenRAG Introduction
Let's discover **Docusaurus in less than 5 minutes**.
## Getting Started
Get started by **creating a new site**.
Or **try Docusaurus immediately** with **[docusaurus.new](https://docusaurus.new)**.
### What you'll need
- [Node.js](https://nodejs.org/en/download/) version 18.0 or above:
- When installing Node.js, you are recommended to check all checkboxes related to dependencies.
## Generate a new site
Generate a new Docusaurus site using the **classic template**.
The classic template will automatically be added to your project after you run the command:
```bash
npm init docusaurus@latest my-website classic
```
You can type this command into Command Prompt, Powershell, Terminal, or any other integrated terminal of your code editor.
The command also installs all necessary dependencies you need to run Docusaurus.
## Start your site
Run the development server:
```bash
cd my-website
npm run start
```
The `cd` command changes the directory you're working with. In order to work with your newly created Docusaurus site, you'll need to navigate the terminal there.
The `npm run start` command builds your website locally and serves it through a development server, ready for you to view at http://localhost:3000/.
Open `docs/intro.md` (this page) and edit some lines: the site **reloads automatically** and displays your changes.

View file

@ -1,8 +1,13 @@
---
title: Terminal Interface (TUI)
slug: /get-started/tui
---
# OpenRAG TUI Guide
The OpenRAG Terminal User Interface (TUI) provides a streamlined way to set up, configure, and monitor your OpenRAG deployment directly from the terminal.
![OpenRAG TUI Interface](../assets/OpenRAG_TUI_2025-09-10T13_04_11_757637.svg)
![OpenRAG TUI Interface](@site/static/img/OpenRAG_TUI_2025-09-10T13_04_11_757637.svg)
## Launch

View file

@ -1,3 +1,8 @@
---
title: Troubleshooting
slug: /reference/troubleshooting
---
# Troubleshooting
## Podman on macOS

119
docs/docusaurus.config.js Normal file
View file

@ -0,0 +1,119 @@
// @ts-check
// `@type` JSDoc annotations allow editor autocompletion and type checking
// (when paired with `@ts-check`).
// There are various equivalent ways to declare your Docusaurus config.
// See: https://docusaurus.io/docs/api/docusaurus-config
import {themes as prismThemes} from 'prism-react-renderer';
// This runs in Node.js - Don't use client-side code here (browser APIs, JSX...)
/** @type {import('@docusaurus/types').Config} */
const config = {
title: 'OpenRAG',
tagline: 'Open Source RAG Platform',
favicon: 'img/favicon.ico',
// Future flags, see https://docusaurus.io/docs/api/docusaurus-config#future
future: {
v4: true, // Improve compatibility with the upcoming Docusaurus v4
},
// Set the production url of your site here
url: 'https://langflow-ai.github.io',
// Set the /<baseUrl>/ pathname under which your site is served
// For GitHub pages deployment, it is often '/<projectName>/'
baseUrl: process.env.BASE_URL ? process.env.BASE_URL : '/openrag/',
// GitHub pages deployment config.
// If you aren't using GitHub pages, you don't need these.
organizationName: 'langflow-ai', // Usually your GitHub org/user name.
projectName: 'openrag', // Usually your repo name.
onBrokenLinks: 'throw',
onBrokenMarkdownLinks: 'warn',
// Even if you don't use internationalization, you can use this field to set
// useful metadata like html lang. For example, if your site is Chinese, you
// may want to replace "en" with "zh-Hans".
i18n: {
defaultLocale: 'en',
locales: ['en'],
},
presets: [
[
'classic',
/** @type {import('@docusaurus/preset-classic').Options} */
({
docs: {
sidebarPath: './sidebars.js',
// Please change this to your repo.
// Remove this to remove the "edit this page" links.
editUrl:
'https://github.com/openrag/openrag/tree/main/docs/',
routeBasePath: '/',
},
theme: {
customCss: './src/css/custom.css',
},
}),
],
],
themeConfig:
/** @type {import('@docusaurus/preset-classic').ThemeConfig} */
({
// Replace with your project's social card
image: 'img/docusaurus-social-card.jpg',
navbar: {
title: 'OpenRAG',
logo: {
alt: 'OpenRAG Logo',
src: 'img/logo.svg',
href: '/',
},
items: [
{
href: 'https://github.com/openrag/openrag',
label: 'GitHub',
position: 'right',
},
],
},
footer: {
style: 'dark',
links: [
{
title: 'Documentation',
items: [
{
label: 'Getting Started',
to: '/',
},
],
},
{
title: 'Community',
items: [
{
label: 'GitHub',
href: 'https://github.com/openrag/openrag',
},
{
label: 'Discord',
href: 'https://discord.gg/openrag',
},
],
},
],
copyright: `Copyright © ${new Date().getFullYear()} OpenRAG. Built with Docusaurus.`,
},
prism: {
theme: prismThemes.github,
darkTheme: prismThemes.dracula,
},
}),
};
export default config;

17575
docs/package-lock.json generated Normal file

File diff suppressed because it is too large Load diff

48
docs/package.json Normal file
View file

@ -0,0 +1,48 @@
{
"name": "openrag-docs",
"version": "0.0.0",
"private": true,
"scripts": {
"docusaurus": "docusaurus",
"start": "docusaurus start",
"build": "docusaurus build",
"swizzle": "docusaurus swizzle",
"deploy": "docusaurus deploy",
"clear": "docusaurus clear",
"serve": "docusaurus serve",
"write-translations": "docusaurus write-translations",
"write-heading-ids": "docusaurus write-heading-ids",
"typecheck": "tsc"
},
"dependencies": {
"@docusaurus/core": "3.8.1",
"@docusaurus/preset-classic": "3.8.1",
"@mdx-js/react": "^3.0.0",
"clsx": "^2.0.0",
"lucide-react": "^0.544.0",
"prism-react-renderer": "^2.3.0",
"react": "^19.0.0",
"react-dom": "^19.0.0"
},
"devDependencies": {
"@docusaurus/module-type-aliases": "3.8.1",
"@docusaurus/tsconfig": "3.8.1",
"@docusaurus/types": "3.8.1",
"typescript": "~5.6.2"
},
"browserslist": {
"production": [
">0.5%",
"not dead",
"not op_mini all"
],
"development": [
"last 3 chrome version",
"last 3 firefox version",
"last 5 safari version"
]
},
"engines": {
"node": ">=18.0"
}
}

65
docs/sidebars.js Normal file
View file

@ -0,0 +1,65 @@
// @ts-check
// This runs in Node.js - Don't use client-side code here (browser APIs, JSX...)
/**
* Creating a sidebar enables you to:
- create an ordered group of docs
- render a sidebar for each doc of that group
- provide next/previous navigation
The sidebars can be generated from the filesystem, or explicitly defined here.
Create as many sidebars as you want.
@type {import('@docusaurus/plugin-content-docs').SidebarsConfig}
*/
const sidebars = {
tutorialSidebar: [
{
type: "category",
label: "Get Started",
items: [
{
type: "doc",
id: "get-started/intro",
label: "Introduction"
},
{
type: "doc",
id: "get-started/docker",
label: "Docker Deployment"
},
{
type: "doc",
id: "get-started/tui",
label: "Terminal Interface (TUI)"
},
],
},
{
type: "category",
label: "Configuration",
items: [
{
type: "doc",
id: "configure/configuration",
label: "Environment Variables"
},
],
},
{
type: "category",
label: "Reference",
items: [
{
type: "doc",
id: "reference/troubleshooting",
label: "Troubleshooting"
},
],
},
],
};
export default sidebars;

View file

@ -0,0 +1,19 @@
import React from "react";
import * as LucideIcons from "lucide-react";
/*
How to use this component:
import Icon from "@site/src/components/icon";
<Icon name="AlertCircle" size={24} color="red" />
*/
type IconProps = {
name: string;
};
export default function Icon({ name, ...props }: IconProps) {
const Icon = LucideIcons[name];
return Icon ? <Icon {...props} /> : null;
}

30
docs/src/css/custom.css Normal file
View file

@ -0,0 +1,30 @@
/**
* Any CSS included here will be global. The classic template
* bundles Infima by default. Infima is a CSS framework designed to
* work well for content-centric websites.
*/
/* You can override the default Infima variables here. */
:root {
--ifm-color-primary: #2e8555;
--ifm-color-primary-dark: #29784c;
--ifm-color-primary-darker: #277148;
--ifm-color-primary-darkest: #205d3b;
--ifm-color-primary-light: #33925d;
--ifm-color-primary-lighter: #359962;
--ifm-color-primary-lightest: #3cad6e;
--ifm-code-font-size: 95%;
--docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.1);
}
/* For readability concerns, you should choose a lighter palette in dark mode. */
[data-theme='dark'] {
--ifm-color-primary: #25c2a0;
--ifm-color-primary-dark: #21af90;
--ifm-color-primary-darker: #1fa588;
--ifm-color-primary-darkest: #1a8870;
--ifm-color-primary-light: #29d5b0;
--ifm-color-primary-lighter: #32d8b4;
--ifm-color-primary-lightest: #4fddbf;
--docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3);
}

0
docs/static/.nojekyll vendored Normal file
View file

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 35 KiB

BIN
docs/static/img/favicon.ico vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

1
docs/static/img/logo.svg vendored Normal file
View file

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="22" viewBox="0 0 24 22" fill="currentColor" class="h-6 w-6"><path d="M13.0486 0.462158H9.75399C9.44371 0.462158 9.14614 0.586082 8.92674 0.806667L4.03751 5.72232C3.81811 5.9429 3.52054 6.06682 3.21026 6.06682H1.16992C0.511975 6.06682 -0.0165756 6.61212 0.000397655 7.2734L0.0515933 9.26798C0.0679586 9.90556 0.586745 10.4139 1.22111 10.4139H3.59097C3.90124 10.4139 4.19881 10.2899 4.41821 10.0694L9.34823 5.11269C9.56763 4.89211 9.8652 4.76818 10.1755 4.76818H13.0486C13.6947 4.76818 14.2185 4.24157 14.2185 3.59195V1.63839C14.2185 0.988773 13.6947 0.462158 13.0486 0.462158Z"></path><path d="M19.5355 11.5862H22.8301C23.4762 11.5862 24 12.1128 24 12.7624V14.716C24 15.3656 23.4762 15.8922 22.8301 15.8922H19.957C19.6467 15.8922 19.3491 16.0161 19.1297 16.2367L14.1997 21.1934C13.9803 21.414 13.6827 21.5379 13.3725 21.5379H11.0026C10.3682 21.5379 9.84945 21.0296 9.83309 20.392L9.78189 18.3974C9.76492 17.7361 10.2935 17.1908 10.9514 17.1908H12.9918C13.302 17.1908 13.5996 17.0669 13.819 16.8463L18.7082 11.9307C18.9276 11.7101 19.2252 11.5862 19.5355 11.5862Z"></path><path d="M19.5355 2.9796L22.8301 2.9796C23.4762 2.9796 24 3.50622 24 4.15583V6.1094C24 6.75901 23.4762 7.28563 22.8301 7.28563H19.957C19.6467 7.28563 19.3491 7.40955 19.1297 7.63014L14.1997 12.5868C13.9803 12.8074 13.6827 12.9313 13.3725 12.9313H10.493C10.1913 12.9313 9.90126 13.0485 9.68346 13.2583L4.14867 18.5917C3.93087 18.8016 3.64085 18.9187 3.33917 18.9187H1.32174C0.675616 18.9187 0.151832 18.3921 0.151832 17.7425V15.7343C0.151832 15.0846 0.675616 14.558 1.32174 14.558H3.32468C3.63496 14.558 3.93253 14.4341 4.15193 14.2135L9.40827 8.92878C9.62767 8.70819 9.92524 8.58427 10.2355 8.58427H12.9918C13.302 8.58427 13.5996 8.46034 13.819 8.23976L18.7082 3.32411C18.9276 3.10353 19.2252 2.9796 19.5355 2.9796Z"></path></svg>

After

Width:  |  Height:  |  Size: 1.8 KiB

8821
docs/yarn.lock Normal file

File diff suppressed because it is too large Load diff

View file

@ -2,7 +2,9 @@
"data": {
"id": "OllamaEmbeddings-4ah5Q",
"node": {
"base_classes": ["Embeddings"],
"base_classes": [
"Embeddings"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
@ -10,10 +12,13 @@
"display_name": "Ollama Embeddings",
"documentation": "https://python.langchain.com/docs/integrations/text_embedding/ollama",
"edited": false,
"field_order": ["model_name", "base_url"],
"field_order": [
"model_name",
"base_url"
],
"frozen": false,
"icon": "Ollama",
"last_updated": "2025-09-17T20:01:59.954Z",
"last_updated": "2025-09-22T20:18:27.128Z",
"legacy": false,
"metadata": {
"code_hash": "0db0f99e91e9",
@ -29,12 +34,17 @@
},
{
"name": "langflow",
"version": "1.5.0.post2"
"version": null
}
],
"total_dependencies": 3
},
"keywords": ["model", "llm", "language model", "large language model"],
"keywords": [
"model",
"llm",
"language model",
"large language model"
],
"module": "langflow.components.ollama.ollama_embeddings.OllamaEmbeddingsComponent"
},
"minimized": false,
@ -51,7 +61,9 @@
"required_inputs": null,
"selected": "Embeddings",
"tool_mode": true,
"types": ["Embeddings"],
"types": [
"Embeddings"
],
"value": "__UNDEFINED__"
}
],
@ -64,7 +76,9 @@
"display_name": "Ollama Base URL",
"dynamic": false,
"info": "",
"input_types": ["Message"],
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": true,
@ -106,7 +120,9 @@
"dynamic": false,
"info": "",
"name": "model_name",
"options": [],
"options": [
"all-minilm:latest"
],
"options_metadata": [],
"placeholder": "",
"real_time_refresh": true,
@ -118,7 +134,7 @@
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": ""
"value": "all-minilm:latest"
}
},
"tool_mode": false
@ -133,9 +149,9 @@
"width": 320
},
"position": {
"x": 964,
"y": 248
"x": 282.29416840859585,
"y": 279.4218065717267
},
"selected": false,
"type": "genericNode"
}
}

View file

@ -2,7 +2,10 @@
"data": {
"id": "OllamaModel-eCsJx",
"node": {
"base_classes": ["LanguageModel", "Message"],
"base_classes": [
"LanguageModel",
"Message"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
@ -40,7 +43,7 @@
],
"frozen": false,
"icon": "Ollama",
"last_updated": "2025-09-17T20:01:59.191Z",
"last_updated": "2025-09-22T20:14:45.057Z",
"legacy": false,
"metadata": {
"code_hash": "af399d429d23",
@ -56,12 +59,17 @@
},
{
"name": "langflow",
"version": "1.5.0.post2"
"version": null
}
],
"total_dependencies": 3
},
"keywords": ["model", "llm", "language model", "large language model"],
"keywords": [
"model",
"llm",
"language model",
"large language model"
],
"module": "langflow.components.ollama.ollama.ChatOllamaComponent"
},
"minimized": false,
@ -77,7 +85,9 @@
"options": null,
"required_inputs": null,
"tool_mode": true,
"types": ["Message"],
"types": [
"Message"
],
"value": "__UNDEFINED__"
},
{
@ -91,7 +101,9 @@
"required_inputs": null,
"selected": "LanguageModel",
"tool_mode": true,
"types": ["LanguageModel"],
"types": [
"LanguageModel"
],
"value": "__UNDEFINED__"
}
],
@ -104,7 +116,9 @@
"display_name": "Base URL",
"dynamic": false,
"info": "Endpoint of the Ollama API.",
"input_types": ["Message"],
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": true,
@ -144,7 +158,9 @@
"display_name": "Format",
"dynamic": false,
"info": "Specify the format of the output (e.g., json).",
"input_types": ["Message"],
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
@ -165,7 +181,9 @@
"display_name": "Input",
"dynamic": false,
"info": "",
"input_types": ["Message"],
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
@ -207,7 +225,11 @@
"dynamic": false,
"info": "Enable/disable Mirostat sampling for controlling perplexity.",
"name": "mirostat",
"options": ["Disabled", "Mirostat", "Mirostat 2.0"],
"options": [
"Disabled",
"Mirostat",
"Mirostat 2.0"
],
"options_metadata": [],
"placeholder": "",
"real_time_refresh": true,
@ -265,7 +287,9 @@
"dynamic": false,
"info": "Refer to https://ollama.com/library for more models.",
"name": "model_name",
"options": [],
"options": [
"qwen3:4b"
],
"options_metadata": [],
"placeholder": "",
"real_time_refresh": true,
@ -277,7 +301,7 @@
"tool_mode": false,
"trace_as_metadata": true,
"type": "str",
"value": ""
"value": "qwen3:4b"
},
"num_ctx": {
"_input_type": "IntInput",
@ -375,7 +399,9 @@
"display_name": "Stop Tokens",
"dynamic": false,
"info": "Comma-separated list of tokens to signal the model to stop generating text.",
"input_types": ["Message"],
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
@ -414,7 +440,9 @@
"display_name": "System",
"dynamic": false,
"info": "System to use for generating text.",
"input_types": ["Message"],
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
@ -436,7 +464,9 @@
"display_name": "System Message",
"dynamic": false,
"info": "System message to pass to the model.",
"input_types": ["Message"],
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
@ -458,7 +488,9 @@
"display_name": "Tags",
"dynamic": false,
"info": "Comma-separated list of tags to add to the run trace.",
"input_types": ["Message"],
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
@ -507,7 +539,9 @@
"display_name": "Template",
"dynamic": false,
"info": "Template to use for generating text.",
"input_types": ["Message"],
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
@ -638,15 +672,16 @@
"showNode": true,
"type": "OllamaModel"
},
"dragging": false,
"id": "OllamaModel-eCsJx",
"measured": {
"height": 494,
"width": 320
},
"position": {
"x": 554,
"y": 225
"x": 248.08287272472313,
"y": 216.98088326271431
},
"selected": false,
"type": "genericNode"
}
}

File diff suppressed because one or more lines are too long

View file

@ -2,7 +2,9 @@
"data": {
"id": "WatsonxEmbeddingsComponent-pJfXI",
"node": {
"base_classes": ["Embeddings"],
"base_classes": [
"Embeddings"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
@ -20,6 +22,7 @@
],
"frozen": false,
"icon": "WatsonxAI",
"last_updated": "2025-09-22T20:11:38.181Z",
"legacy": false,
"metadata": {
"code_hash": "b6c6d50cc7ed",
@ -43,7 +46,7 @@
},
{
"name": "langflow",
"version": "1.5.0.post2"
"version": null
}
],
"total_dependencies": 5
@ -60,9 +63,13 @@
"group_outputs": false,
"method": "build_embeddings",
"name": "embeddings",
"options": null,
"required_inputs": null,
"selected": "Embeddings",
"tool_mode": true,
"types": ["Embeddings"],
"types": [
"Embeddings"
],
"value": "__UNDEFINED__"
}
],
@ -131,7 +138,16 @@
"dynamic": true,
"info": "",
"name": "model_name",
"options": [],
"options": [
"ibm/granite-embedding-107m-multilingual",
"ibm/granite-embedding-278m-multilingual",
"ibm/slate-125m-english-rtrvr",
"ibm/slate-125m-english-rtrvr-v2",
"ibm/slate-30m-english-rtrvr",
"ibm/slate-30m-english-rtrvr-v2",
"intfloat/multilingual-e5-large",
"sentence-transformers/all-minilm-l6-v2"
],
"options_metadata": [],
"placeholder": "",
"required": true,
@ -140,7 +156,8 @@
"toggle": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str"
"type": "str",
"value": "ibm/granite-embedding-107m-multilingual"
},
"project_id": {
"_input_type": "StrInput",
@ -205,7 +222,8 @@
"toggle": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str"
"type": "str",
"value": "https://us-south.ml.cloud.ibm.com"
}
},
"tool_mode": false
@ -213,15 +231,16 @@
"showNode": true,
"type": "WatsonxEmbeddingsComponent"
},
"dragging": false,
"id": "WatsonxEmbeddingsComponent-pJfXI",
"measured": {
"height": 467,
"width": 320
},
"position": {
"x": 999.129592360849,
"y": 753.2332292351236
"x": 364.4406919374723,
"y": 282.29319267029086
},
"selected": false,
"type": "genericNode"
}
}

View file

@ -2,7 +2,10 @@
"data": {
"id": "IBMwatsonxModel-jA4Nw",
"node": {
"base_classes": ["LanguageModel", "Message"],
"base_classes": [
"LanguageModel",
"Message"
],
"beta": false,
"conditional_paths": [],
"custom_fields": {},
@ -31,6 +34,7 @@
],
"frozen": false,
"icon": "WatsonxAI",
"last_updated": "2025-09-22T20:03:31.248Z",
"legacy": false,
"metadata": {
"code_hash": "7767fd69a954",
@ -50,12 +54,17 @@
},
{
"name": "langflow",
"version": "1.5.0.post2"
"version": null
}
],
"total_dependencies": 4
},
"keywords": ["model", "llm", "language model", "large language model"],
"keywords": [
"model",
"llm",
"language model",
"large language model"
],
"module": "langflow.components.ibm.watsonx.WatsonxAIComponent"
},
"minimized": false,
@ -68,8 +77,12 @@
"group_outputs": false,
"method": "text_response",
"name": "text_output",
"options": null,
"required_inputs": null,
"tool_mode": true,
"types": ["Message"],
"types": [
"Message"
],
"value": "__UNDEFINED__"
},
{
@ -79,9 +92,13 @@
"group_outputs": false,
"method": "build_model",
"name": "model_output",
"options": null,
"required_inputs": null,
"selected": "LanguageModel",
"tool_mode": true,
"types": ["LanguageModel"],
"types": [
"LanguageModel"
],
"value": "__UNDEFINED__"
}
],
@ -157,7 +174,9 @@
"display_name": "Input",
"dynamic": false,
"info": "",
"input_types": ["Message"],
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
@ -242,7 +261,26 @@
"dynamic": true,
"info": "",
"name": "model_name",
"options": [],
"options": [
"ibm/granite-3-2-8b-instruct",
"ibm/granite-3-2b-instruct",
"ibm/granite-3-3-8b-instruct",
"ibm/granite-3-8b-instruct",
"ibm/granite-guardian-3-2b",
"ibm/granite-guardian-3-8b",
"ibm/granite-vision-3-2-2b",
"meta-llama/llama-3-2-11b-vision-instruct",
"meta-llama/llama-3-2-90b-vision-instruct",
"meta-llama/llama-3-3-70b-instruct",
"meta-llama/llama-3-405b-instruct",
"meta-llama/llama-4-maverick-17b-128e-instruct-fp8",
"meta-llama/llama-guard-3-11b-vision",
"mistralai/mistral-large",
"mistralai/mistral-medium-2505",
"mistralai/mistral-small-3-1-24b-instruct-2503",
"mistralai/pixtral-12b",
"openai/gpt-oss-120b"
],
"options_metadata": [],
"placeholder": "",
"required": true,
@ -251,7 +289,8 @@
"toggle": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str"
"type": "str",
"value": "ibm/granite-3-2-8b-instruct"
},
"presence_penalty": {
"_input_type": "SliderInput",
@ -362,7 +401,9 @@
"display_name": "System Message",
"dynamic": false,
"info": "System message to pass to the model.",
"input_types": ["Message"],
"input_types": [
"Message"
],
"list": false,
"list_add_label": "Add More",
"load_from_db": false,
@ -484,7 +525,8 @@
"toggle": false,
"tool_mode": false,
"trace_as_metadata": true,
"type": "str"
"type": "str",
"value": "https://us-south.ml.cloud.ibm.com"
}
},
"tool_mode": false
@ -493,15 +535,16 @@
"showNode": true,
"type": "IBMwatsonxModel"
},
"dragging": false,
"id": "IBMwatsonxModel-jA4Nw",
"measured": {
"height": 632,
"width": 320
},
"position": {
"x": 562.2658900512183,
"y": 895.3455179382565
"x": 371.93566807042805,
"y": 197.47711431325635
},
"selected": false,
"type": "genericNode"
}
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -12,6 +12,7 @@ import { Button } from "./ui/button";
import { DeleteConfirmationDialog } from "./confirmation-dialog";
import { useDeleteDocument } from "@/app/api/mutations/useDeleteDocument";
import { toast } from "sonner";
import { useRouter } from "next/navigation";
interface KnowledgeActionsDropdownProps {
filename: string;
@ -22,6 +23,7 @@ export const KnowledgeActionsDropdown = ({
}: KnowledgeActionsDropdownProps) => {
const [showDeleteDialog, setShowDeleteDialog] = useState(false);
const deleteDocumentMutation = useDeleteDocument();
const router = useRouter();
const handleDelete = async () => {
try {
@ -43,7 +45,17 @@ export const KnowledgeActionsDropdown = ({
<EllipsisVertical className="h-4 w-4" />
</Button>
</DropdownMenuTrigger>
<DropdownMenuContent side="right" sideOffset={-10}>
<DropdownMenuContent side="right" align="start" sideOffset={-10}>
<DropdownMenuItem
className="text-primary focus:text-primary"
onClick={() => {
router.push(
`/knowledge/chunks?filename=${encodeURIComponent(filename)}`
);
}}
>
View chunks
</DropdownMenuItem>
{/* //TODO: Implement rename and sync */}
{/* <DropdownMenuItem
className="text-primary focus:text-primary"

View file

@ -0,0 +1,271 @@
"use client";
import { useState } from "react";
import { Button } from "@/components/ui/button";
import { Input } from "@/components/ui/input";
import { Label } from "@/components/ui/label";
import { Textarea } from "@/components/ui/textarea";
import { Filter, Loader2, Plus, Save, X } from "lucide-react";
import { cn } from "@/lib/utils";
import {
useGetFiltersSearchQuery,
type KnowledgeFilter,
} from "@/src/app/api/queries/useGetFiltersSearchQuery";
import { useCreateFilter } from "@/src/app/api/mutations/useCreateFilter";
import {
Dialog,
DialogContent,
DialogDescription,
DialogHeader,
DialogTitle,
} from "@/components/ui/dialog";
interface ParsedQueryData {
query: string;
filters: {
data_sources: string[];
document_types: string[];
owners: string[];
};
limit: number;
scoreThreshold: number;
}
interface KnowledgeFilterListProps {
selectedFilter: KnowledgeFilter | null;
onFilterSelect: (filter: KnowledgeFilter | null) => void;
}
export function KnowledgeFilterList({
selectedFilter,
onFilterSelect,
}: KnowledgeFilterListProps) {
const [searchQuery] = useState("");
const [showCreateModal, setShowCreateModal] = useState(false);
const [createName, setCreateName] = useState("");
const [createDescription, setCreateDescription] = useState("");
const [creating, setCreating] = useState(false);
const { data, isFetching: loading } = useGetFiltersSearchQuery(
searchQuery,
20
);
const filters = data || [];
const createFilterMutation = useCreateFilter();
const handleFilterSelect = (filter: KnowledgeFilter) => {
onFilterSelect(filter);
};
const handleCreateNew = () => {
setShowCreateModal(true);
};
const handleCreateFilter = async () => {
if (!createName.trim()) return;
setCreating(true);
try {
// Create a basic filter with wildcards (match everything by default)
const defaultFilterData = {
query: "",
filters: {
data_sources: ["*"],
document_types: ["*"],
owners: ["*"],
},
limit: 10,
scoreThreshold: 0,
};
const result = await createFilterMutation.mutateAsync({
name: createName.trim(),
description: createDescription.trim(),
queryData: JSON.stringify(defaultFilterData),
});
// Select the new filter from API response
onFilterSelect(result.filter);
// Close modal and reset form
setShowCreateModal(false);
setCreateName("");
setCreateDescription("");
} catch (error) {
console.error("Error creating knowledge filter:", error);
} finally {
setCreating(false);
}
};
const handleCancelCreate = () => {
setShowCreateModal(false);
setCreateName("");
setCreateDescription("");
};
const parseQueryData = (queryData: string): ParsedQueryData => {
return JSON.parse(queryData) as ParsedQueryData;
};
return (
<>
<div className="flex flex-col items-center gap-1 px-3 !mb-12 mt-0 h-full overflow-y-auto">
<div className="flex items-center w-full justify-between pl-3">
<div className="text-sm font-medium text-muted-foreground">
Knowledge Filters
</div>
<Button
variant="ghost"
size="sm"
onClick={handleCreateNew}
title="Create New Filter"
className="h-8 px-3 text-muted-foreground"
>
<Plus className="h-3 w-3" />
</Button>
</div>
{loading ? (
<div className="flex items-center justify-center p-4">
<Loader2 className="h-4 w-4 animate-spin" />
<span className="ml-2 text-sm text-muted-foreground">
Loading...
</span>
</div>
) : filters.length === 0 ? (
<div className="p-4 text-center text-sm text-muted-foreground">
{searchQuery ? "No filters found" : "No saved filters"}
</div>
) : (
filters.map((filter) => (
<div
key={filter.id}
onClick={() => handleFilterSelect(filter)}
className={cn(
"flex items-center gap-3 px-3 py-2 w-full rounded-lg hover:bg-accent hover:text-accent-foreground cursor-pointer group transition-colors",
selectedFilter?.id === filter.id &&
"bg-accent text-accent-foreground"
)}
>
<div className="flex flex-col gap-1 flex-1 min-w-0">
<div className="flex items-center gap-2">
<div className="flex items-center justify-center bg-blue-500/20 w-5 h-5 rounded">
<Filter className="h-3 w-3 text-blue-400" />
</div>
<div className="text-sm font-medium truncate group-hover:text-accent-foreground">
{filter.name}
</div>
</div>
{filter.description && (
<div className="text-xs text-muted-foreground group-hover:text-accent-foreground/70 line-clamp-2">
{filter.description}
</div>
)}
<div className="flex items-center gap-2">
<div className="text-xs text-muted-foreground group-hover:text-accent-foreground/70">
{new Date(filter.created_at).toLocaleDateString(undefined, {
month: "short",
day: "numeric",
year: "numeric",
})}
</div>
<span className="text-xs bg-muted text-muted-foreground px-1 py-0.5 rounded-sm">
{(() => {
const dataSources = parseQueryData(filter.query_data)
.filters.data_sources;
if (dataSources[0] === "*") return "All sources";
const count = dataSources.length;
return `${count} ${count === 1 ? "source" : "sources"}`;
})()}
</span>
</div>
</div>
{selectedFilter?.id === filter.id && (
<Button
variant="ghost"
size="sm"
className="px-0"
onClick={(e) => {
e.stopPropagation();
onFilterSelect(null);
}}
>
<X className="h-4 w-4 flex-shrink-0 opacity-0 group-hover:opacity-100 text-muted-foreground" />
</Button>
)}
</div>
))
)}
</div>
{/* Create Filter Dialog */}
<Dialog open={showCreateModal} onOpenChange={setShowCreateModal}>
<DialogContent>
<DialogHeader>
<DialogTitle>Create a new knowledge filter</DialogTitle>
<DialogDescription>
Save a reusable filter to quickly scope searches across your
knowledge base.
</DialogDescription>
</DialogHeader>
<div className="flex flex-col gap-2 space-y-2">
<div>
<Label htmlFor="filter-name" className="font-medium mb-2 gap-1">
Name<span className="text-red-400">*</span>
</Label>
<Input
id="filter-name"
type="text"
placeholder="Enter filter name"
value={createName}
onChange={(e) => setCreateName(e.target.value)}
className="mt-1"
/>
</div>
<div>
<Label htmlFor="filter-description" className="font-medium mb-2">
Description (optional)
</Label>
<Textarea
id="filter-description"
placeholder="Brief description of this filter"
value={createDescription}
onChange={(e) => setCreateDescription(e.target.value)}
className="mt-1"
rows={3}
/>
</div>
</div>
<div className="flex justify-end gap-2">
<Button
variant="outline"
onClick={handleCancelCreate}
disabled={creating}
>
Cancel
</Button>
<Button
onClick={handleCreateFilter}
disabled={!createName.trim() || creating}
className="flex items-center gap-2"
>
{creating ? (
<>
<Loader2 className="h-4 w-4 animate-spin" />
Creating...
</>
) : (
<>
<Save className="h-4 w-4" />
Create Filter
</>
)}
</Button>
</div>
</DialogContent>
</Dialog>
</>
);
}

View file

@ -1,133 +1,115 @@
"use client"
import { useState, useEffect } from 'react'
import { X, Edit3, Save, Settings, RefreshCw } from 'lucide-react'
import { Button } from '@/components/ui/button'
import { Card, CardContent, CardDescription, CardHeader, CardTitle } from '@/components/ui/card'
import { Input } from '@/components/ui/input'
import { Label } from '@/components/ui/label'
import { Textarea } from '@/components/ui/textarea'
import { MultiSelect } from '@/components/ui/multi-select'
import { Slider } from '@/components/ui/slider'
import { useKnowledgeFilter } from '@/contexts/knowledge-filter-context'
"use client";
import { useState, useEffect } from "react";
import { X, Edit3, Save, RefreshCw } from "lucide-react";
import { Button } from "@/components/ui/button";
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
import { Input } from "@/components/ui/input";
import { Label } from "@/components/ui/label";
import { Textarea } from "@/components/ui/textarea";
import { MultiSelect } from "@/components/ui/multi-select";
import { Slider } from "@/components/ui/slider";
import { useKnowledgeFilter } from "@/contexts/knowledge-filter-context";
import { useDeleteFilter } from "@/app/api/mutations/useDeleteFilter";
import { useUpdateFilter } from "@/app/api/mutations/useUpdateFilter";
import { useGetSearchAggregations } from "@/src/app/api/queries/useGetSearchAggregations";
interface FacetBucket {
key: string
count: number
key: string;
count: number;
}
interface AvailableFacets {
data_sources: FacetBucket[]
document_types: FacetBucket[]
owners: FacetBucket[]
connector_types: FacetBucket[]
data_sources: FacetBucket[];
document_types: FacetBucket[];
owners: FacetBucket[];
connector_types: FacetBucket[];
}
export function KnowledgeFilterPanel() {
const { selectedFilter, parsedFilterData, setSelectedFilter, isPanelOpen, closePanelOnly } = useKnowledgeFilter()
const {
selectedFilter,
parsedFilterData,
setSelectedFilter,
isPanelOpen,
closePanelOnly,
} = useKnowledgeFilter();
const deleteFilterMutation = useDeleteFilter();
const updateFilterMutation = useUpdateFilter();
// Edit mode states
const [isEditingMeta, setIsEditingMeta] = useState(false)
const [editingName, setEditingName] = useState('')
const [editingDescription, setEditingDescription] = useState('')
const [isSaving, setIsSaving] = useState(false)
const [isEditingMeta, setIsEditingMeta] = useState(false);
const [editingName, setEditingName] = useState("");
const [editingDescription, setEditingDescription] = useState("");
const [isSaving, setIsSaving] = useState(false);
// Filter configuration states (mirror search page exactly)
const [query, setQuery] = useState('')
const [query, setQuery] = useState("");
const [selectedFilters, setSelectedFilters] = useState({
data_sources: ["*"] as string[], // Default to wildcard
document_types: ["*"] as string[], // Default to wildcard
owners: ["*"] as string[], // Default to wildcard
connector_types: ["*"] as string[] // Default to wildcard
})
const [resultLimit, setResultLimit] = useState(10)
const [scoreThreshold, setScoreThreshold] = useState(0)
connector_types: ["*"] as string[], // Default to wildcard
});
const [resultLimit, setResultLimit] = useState(10);
const [scoreThreshold, setScoreThreshold] = useState(0);
// Available facets (loaded from API)
const [availableFacets, setAvailableFacets] = useState<AvailableFacets>({
data_sources: [],
document_types: [],
owners: [],
connector_types: []
})
connector_types: [],
});
// Load current filter data into controls
useEffect(() => {
if (selectedFilter && parsedFilterData) {
setQuery(parsedFilterData.query || '')
setQuery(parsedFilterData.query || "");
// Set the actual filter selections from the saved knowledge filter
const filters = parsedFilterData.filters
const filters = parsedFilterData.filters;
// Use the exact selections from the saved filter
// Empty arrays mean "none selected" not "all selected"
const processedFilters = {
data_sources: filters.data_sources,
document_types: filters.document_types,
owners: filters.owners,
connector_types: filters.connector_types || ["*"]
}
console.log("[DEBUG] Loading filter selections:", processedFilters)
setSelectedFilters(processedFilters)
setResultLimit(parsedFilterData.limit || 10)
setScoreThreshold(parsedFilterData.scoreThreshold || 0)
setEditingName(selectedFilter.name)
setEditingDescription(selectedFilter.description || '')
}
}, [selectedFilter, parsedFilterData])
connector_types: filters.connector_types || ["*"],
};
console.log("[DEBUG] Loading filter selections:", processedFilters);
setSelectedFilters(processedFilters);
setResultLimit(parsedFilterData.limit || 10);
setScoreThreshold(parsedFilterData.scoreThreshold || 0);
setEditingName(selectedFilter.name);
setEditingDescription(selectedFilter.description || "");
}
}, [selectedFilter, parsedFilterData]);
// Load available facets using search aggregations hook
const { data: aggregations } = useGetSearchAggregations("*", 1, 0, {
enabled: isPanelOpen,
placeholderData: (prev) => prev,
staleTime: 60_000,
gcTime: 5 * 60_000,
});
// Load available facets from API
useEffect(() => {
if (isPanelOpen) {
loadAvailableFacets()
}
}, [isPanelOpen])
const loadAvailableFacets = async () => {
console.log("[DEBUG] Loading available facets...")
try {
// Do a search to get facets (similar to search page)
const response = await fetch("/api/search", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
query: "*", // Use wildcard like search page to get all documents/facets
limit: 1,
scoreThreshold: 0
// Omit filters entirely to get all available facets
}),
})
const result = await response.json()
console.log("[DEBUG] Search API response:", result)
if (response.ok && result.aggregations) {
const facets = {
data_sources: result.aggregations.data_sources?.buckets || [],
document_types: result.aggregations.document_types?.buckets || [],
owners: result.aggregations.owners?.buckets || [],
connector_types: result.aggregations.connector_types?.buckets || []
}
console.log("[DEBUG] Setting facets:", facets)
setAvailableFacets(facets)
} else {
console.log("[DEBUG] No aggregations in response or response not ok")
}
} catch (error) {
console.error("Failed to load available facets:", error)
}
}
if (!aggregations) return;
const facets = {
data_sources: aggregations.data_sources?.buckets || [],
document_types: aggregations.document_types?.buckets || [],
owners: aggregations.owners?.buckets || [],
connector_types: aggregations.connector_types?.buckets || [],
};
setAvailableFacets(facets);
}, [aggregations]);
// Don't render if panel is closed or no filter selected
if (!isPanelOpen || !selectedFilter || !parsedFilterData) return null
if (!isPanelOpen || !selectedFilter || !parsedFilterData) return null;
const selectAllFilters = () => {
// Use wildcards instead of listing all specific items
@ -135,116 +117,105 @@ export function KnowledgeFilterPanel() {
data_sources: ["*"],
document_types: ["*"],
owners: ["*"],
connector_types: ["*"]
})
}
connector_types: ["*"],
});
};
const clearAllFilters = () => {
setSelectedFilters({
data_sources: [],
document_types: [],
owners: [],
connector_types: []
})
}
connector_types: [],
});
};
const handleEditMeta = () => {
setIsEditingMeta(true)
}
setIsEditingMeta(true);
};
const handleCancelEdit = () => {
setIsEditingMeta(false)
setEditingName(selectedFilter.name)
setEditingDescription(selectedFilter.description || '')
}
setIsEditingMeta(false);
setEditingName(selectedFilter.name);
setEditingDescription(selectedFilter.description || "");
};
const handleSaveMeta = async () => {
if (!editingName.trim()) return
if (!editingName.trim()) return;
setIsSaving(true)
setIsSaving(true);
try {
const response = await fetch(`/api/knowledge-filter/${selectedFilter.id}`, {
method: 'PUT',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
name: editingName.trim(),
description: editingDescription.trim(),
}),
})
const result = await updateFilterMutation.mutateAsync({
id: selectedFilter.id,
name: editingName.trim(),
description: editingDescription.trim(),
});
const result = await response.json()
if (response.ok && result.success) {
const updatedFilter = {
...selectedFilter,
name: editingName.trim(),
description: editingDescription.trim(),
updated_at: new Date().toISOString(),
}
setSelectedFilter(updatedFilter)
setIsEditingMeta(false)
if (result.success && result.filter) {
setSelectedFilter(result.filter);
setIsEditingMeta(false);
}
} catch (error) {
console.error('Error updating filter:', error)
console.error("Error updating filter:", error);
} finally {
setIsSaving(false)
setIsSaving(false);
}
}
};
const handleSaveConfiguration = async () => {
const filterData = {
query,
filters: selectedFilters,
limit: resultLimit,
scoreThreshold
}
scoreThreshold,
};
setIsSaving(true)
setIsSaving(true);
try {
const response = await fetch(`/api/knowledge-filter/${selectedFilter.id}`, {
method: 'PUT',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
queryData: JSON.stringify(filterData)
}),
})
const result = await updateFilterMutation.mutateAsync({
id: selectedFilter.id,
queryData: JSON.stringify(filterData),
});
const result = await response.json()
if (response.ok && result.success) {
// Update the filter in context
const updatedFilter = {
...selectedFilter,
query_data: JSON.stringify(filterData),
updated_at: new Date().toISOString(),
}
setSelectedFilter(updatedFilter)
if (result.success && result.filter) {
setSelectedFilter(result.filter);
}
} catch (error) {
console.error('Error updating filter configuration:', error)
console.error("Error updating filter configuration:", error);
} finally {
setIsSaving(false)
setIsSaving(false);
}
}
};
const formatDate = (dateString: string) => {
return new Date(dateString).toLocaleDateString('en-US', {
year: 'numeric',
month: 'short',
day: 'numeric',
hour: '2-digit',
minute: '2-digit'
})
}
return new Date(dateString).toLocaleDateString("en-US", {
year: "numeric",
month: "short",
day: "numeric",
hour: "2-digit",
minute: "2-digit",
});
};
const handleFilterChange = (facetType: keyof typeof selectedFilters, newValues: string[]) => {
setSelectedFilters(prev => ({
const handleFilterChange = (
facetType: keyof typeof selectedFilters,
newValues: string[]
) => {
setSelectedFilters((prev) => ({
...prev,
[facetType]: newValues
}))
}
[facetType]: newValues,
}));
};
const handleDeleteFilter = async () => {
const result = await deleteFilterMutation.mutateAsync({
id: selectedFilter.id,
});
if (result.success) {
setSelectedFilter(null);
closePanelOnly();
}
};
return (
<div className="fixed right-0 top-14 bottom-0 w-80 bg-background border-l border-border/40 z-40 overflow-y-auto">
@ -252,7 +223,6 @@ export function KnowledgeFilterPanel() {
<CardHeader className="pb-3">
<div className="flex items-center justify-between">
<CardTitle className="text-lg flex items-center gap-2">
<Settings className="h-5 w-5" />
Knowledge Filter
</CardTitle>
<Button
@ -264,9 +234,6 @@ export function KnowledgeFilterPanel() {
<X className="h-4 w-4" />
</Button>
</div>
<CardDescription>
Configure your knowledge filter settings
</CardDescription>
</CardHeader>
<CardContent className="space-y-6">
@ -301,7 +268,7 @@ export function KnowledgeFilterPanel() {
className="flex-1"
>
<Save className="h-3 w-3 mr-1" />
{isSaving ? 'Saving...' : 'Save'}
{isSaving ? "Saving..." : "Save"}
</Button>
<Button
onClick={handleCancelEdit}
@ -315,9 +282,11 @@ export function KnowledgeFilterPanel() {
</div>
) : (
<div className="space-y-3">
<div className="flex items-start justify-between">
<div className="flex items-center justify-between">
<div className="flex-1">
<h3 className="font-semibold text-lg">{selectedFilter.name}</h3>
<h3 className="font-semibold text-lg">
{selectedFilter.name}
</h3>
{selectedFilter.description && (
<p className="text-sm text-muted-foreground mt-1">
{selectedFilter.description}
@ -336,7 +305,10 @@ export function KnowledgeFilterPanel() {
<div className="text-xs text-muted-foreground">
Created {formatDate(selectedFilter.created_at)}
{selectedFilter.updated_at !== selectedFilter.created_at && (
<span> Updated {formatDate(selectedFilter.updated_at)}</span>
<span>
{" "}
Updated {formatDate(selectedFilter.updated_at)}
</span>
)}
</div>
</div>
@ -345,14 +317,15 @@ export function KnowledgeFilterPanel() {
{/* Search Query */}
<div className="space-y-2">
<Label htmlFor="search-query" className="text-sm font-medium">Search Query</Label>
<Input
<Label htmlFor="search-query" className="text-sm font-medium">
Search Query
</Label>
<Textarea
id="search-query"
type="text"
placeholder="e.g., 'financial reports from Q4'"
value={query}
onChange={(e) => setQuery(e.target.value)}
className="bg-background/50 border-border/50"
rows={3}
/>
</div>
@ -361,13 +334,15 @@ export function KnowledgeFilterPanel() {
<div className="space-y-2">
<Label className="text-sm font-medium">Data Sources</Label>
<MultiSelect
options={(availableFacets.data_sources || []).map(bucket => ({
options={(availableFacets.data_sources || []).map((bucket) => ({
value: bucket.key,
label: bucket.key,
count: bucket.count
count: bucket.count,
}))}
value={selectedFilters.data_sources}
onValueChange={(values) => handleFilterChange('data_sources', values)}
onValueChange={(values) =>
handleFilterChange("data_sources", values)
}
placeholder="Select data sources..."
allOptionLabel="All Data Sources"
/>
@ -376,13 +351,17 @@ export function KnowledgeFilterPanel() {
<div className="space-y-2">
<Label className="text-sm font-medium">Document Types</Label>
<MultiSelect
options={(availableFacets.document_types || []).map(bucket => ({
value: bucket.key,
label: bucket.key,
count: bucket.count
}))}
options={(availableFacets.document_types || []).map(
(bucket) => ({
value: bucket.key,
label: bucket.key,
count: bucket.count,
})
)}
value={selectedFilters.document_types}
onValueChange={(values) => handleFilterChange('document_types', values)}
onValueChange={(values) =>
handleFilterChange("document_types", values)
}
placeholder="Select document types..."
allOptionLabel="All Document Types"
/>
@ -391,13 +370,13 @@ export function KnowledgeFilterPanel() {
<div className="space-y-2">
<Label className="text-sm font-medium">Owners</Label>
<MultiSelect
options={(availableFacets.owners || []).map(bucket => ({
options={(availableFacets.owners || []).map((bucket) => ({
value: bucket.key,
label: bucket.key,
count: bucket.count
count: bucket.count,
}))}
value={selectedFilters.owners}
onValueChange={(values) => handleFilterChange('owners', values)}
onValueChange={(values) => handleFilterChange("owners", values)}
placeholder="Select owners..."
allOptionLabel="All Owners"
/>
@ -406,13 +385,17 @@ export function KnowledgeFilterPanel() {
<div className="space-y-2">
<Label className="text-sm font-medium">Sources</Label>
<MultiSelect
options={(availableFacets.connector_types || []).map(bucket => ({
value: bucket.key,
label: bucket.key,
count: bucket.count
}))}
options={(availableFacets.connector_types || []).map(
(bucket) => ({
value: bucket.key,
label: bucket.key,
count: bucket.count,
})
)}
value={selectedFilters.connector_types}
onValueChange={(values) => handleFilterChange('connector_types', values)}
onValueChange={(values) =>
handleFilterChange("connector_types", values)
}
placeholder="Select sources..."
allOptionLabel="All Sources"
/>
@ -420,18 +403,18 @@ export function KnowledgeFilterPanel() {
{/* All/None buttons */}
<div className="flex gap-2">
<Button
variant="outline"
size="sm"
onClick={selectAllFilters}
<Button
variant="outline"
size="sm"
onClick={selectAllFilters}
className="h-auto px-3 py-1.5 text-xs text-muted-foreground hover:text-foreground hover:bg-muted/50 border-border/50"
>
All
</Button>
<Button
variant="outline"
size="sm"
onClick={clearAllFilters}
<Button
variant="outline"
size="sm"
onClick={clearAllFilters}
className="h-auto px-3 py-1.5 text-xs text-muted-foreground hover:text-foreground hover:bg-muted/50 border-border/50"
>
None
@ -442,18 +425,23 @@ export function KnowledgeFilterPanel() {
<div className="space-y-4 pt-4 border-t border-border/50">
<div className="space-y-2">
<div className="flex items-center justify-between">
<Label className="text-sm font-medium">Limit</Label>
<Label className="text-sm font-medium text-nowrap">
Response limit
</Label>
<Input
type="number"
min="1"
max="1000"
value={resultLimit}
onChange={(e) => {
const newLimit = Math.max(1, Math.min(1000, parseInt(e.target.value) || 1))
setResultLimit(newLimit)
const newLimit = Math.max(
1,
Math.min(1000, parseInt(e.target.value) || 1)
);
setResultLimit(newLimit);
}}
className="h-6 text-xs text-right px-2 bg-muted/30 !border-0 rounded ml-auto focus:ring-0 focus:outline-none"
style={{ width: '70px' }}
style={{ width: "70px" }}
/>
</div>
<Slider
@ -469,16 +457,20 @@ export function KnowledgeFilterPanel() {
{/* Score Threshold Control - exactly like search page */}
<div className="space-y-2">
<div className="flex items-center justify-between">
<Label className="text-sm font-medium">Score Threshold</Label>
<Label className="text-sm font-medium text-nowrap">
Score threshold
</Label>
<Input
type="number"
min="0"
max="5"
step="0.1"
value={scoreThreshold}
onChange={(e) => setScoreThreshold(parseFloat(e.target.value) || 0)}
onChange={(e) =>
setScoreThreshold(parseFloat(e.target.value) || 0)
}
className="h-6 text-xs text-right px-2 bg-muted/30 !border-0 rounded ml-auto focus:ring-0 focus:outline-none"
style={{ width: '70px' }}
style={{ width: "70px" }}
/>
</div>
<Slider
@ -493,7 +485,7 @@ export function KnowledgeFilterPanel() {
</div>
{/* Save Configuration Button */}
<div className="pt-4 border-t border-border/50">
<div className="flex flex-col gap-3 pt-4 border-t border-border/50">
<Button
onClick={handleSaveConfiguration}
disabled={isSaving}
@ -512,10 +504,17 @@ export function KnowledgeFilterPanel() {
</>
)}
</Button>
<Button
variant="destructive"
className="w-full"
onClick={handleDeleteFilter}
>
Delete Filter
</Button>
</div>
</div>
</CardContent>
</Card>
</div>
)
}
);
}

View file

@ -15,6 +15,8 @@ import { useCallback, useEffect, useRef, useState } from "react";
import { EndpointType } from "@/contexts/chat-context";
import { useLoadingStore } from "@/stores/loadingStore";
import { KnowledgeFilterList } from "./knowledge-filter-list";
import { useKnowledgeFilter } from "@/contexts/knowledge-filter-context";
interface RawConversation {
response_id: string;
@ -74,6 +76,8 @@ export function Navigation() {
const [previousConversationCount, setPreviousConversationCount] = useState(0);
const fileInputRef = useRef<HTMLInputElement>(null);
const { selectedFilter, setSelectedFilter } = useKnowledgeFilter();
const handleNewConversation = () => {
setLoadingNewConversation(true);
refreshConversations();
@ -194,6 +198,7 @@ export function Navigation() {
];
const isOnChatPage = pathname === "/" || pathname === "/chat";
const isOnKnowledgePage = pathname.startsWith("/knowledge");
const createDefaultPlaceholder = useCallback(() => {
return {
@ -310,7 +315,7 @@ export function Navigation() {
]);
return (
<div className="space-y-4 py-4 flex flex-col h-full bg-background">
<div className="flex flex-col h-full bg-background">
<div className="px-3 py-2 flex-shrink-0">
<div className="space-y-1">
{routes.map((route) => (
@ -344,6 +349,13 @@ export function Navigation() {
</div>
</div>
{isOnKnowledgePage && (
<KnowledgeFilterList
selectedFilter={selectedFilter}
onFilterSelect={setSelectedFilter}
/>
)}
{/* Chat Page Specific Sections */}
{isOnChatPage && (
<div className="flex-1 min-h-0 flex flex-col">

View file

@ -1,18 +1,18 @@
import * as React from "react"
import * as React from "react";
import { cn } from "@/lib/utils"
import { cn } from "@/lib/utils";
function Textarea({ className, ...props }: React.ComponentProps<"textarea">) {
return (
<textarea
data-slot="textarea"
className={cn(
"border-input placeholder:text-muted-foreground focus-visible:border-ring focus-visible:ring-ring/50 aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive flex field-sizing-content min-h-16 w-full rounded-md border bg-background px-3 py-2 text-base shadow-xs transition-[color,box-shadow] outline-none focus-visible:ring-[3px] disabled:cursor-not-allowed disabled:opacity-50 md:text-sm",
"primary-input placeholder:font-mono placeholder:text-placeholder-foreground min-h-fit",
className
)}
{...props}
/>
)
);
}
export { Textarea }
export { Textarea };

View file

@ -28,6 +28,7 @@
"@radix-ui/react-tabs": "^1.1.13",
"@radix-ui/react-tooltip": "^1.2.8",
"@tailwindcss/forms": "^0.5.10",
"@tailwindcss/line-clamp": "^0.4.4",
"@tailwindcss/typography": "^0.5.16",
"@tanstack/react-query": "^5.86.0",
"ag-grid-community": "^34.2.0",
@ -2317,6 +2318,14 @@
"tailwindcss": ">=3.0.0 || >= 3.0.0-alpha.1 || >= 4.0.0-alpha.20 || >= 4.0.0-beta.1"
}
},
"node_modules/@tailwindcss/line-clamp": {
"version": "0.4.4",
"resolved": "https://registry.npmjs.org/@tailwindcss/line-clamp/-/line-clamp-0.4.4.tgz",
"integrity": "sha512-5U6SY5z8N42VtrCrKlsTAA35gy2VSyYtHWCsg1H87NU1SXnEfekTVlrga9fzUDrrHcGi2Lb5KenUWb4lRQT5/g==",
"peerDependencies": {
"tailwindcss": ">=2.0.0 || >=3.0.0 || >=3.0.0-alpha.1"
}
},
"node_modules/@tailwindcss/typography": {
"version": "0.5.16",
"resolved": "https://registry.npmjs.org/@tailwindcss/typography/-/typography-0.5.16.tgz",

View file

@ -29,6 +29,7 @@
"@radix-ui/react-tabs": "^1.1.13",
"@radix-ui/react-tooltip": "^1.2.8",
"@tailwindcss/forms": "^0.5.10",
"@tailwindcss/line-clamp": "^0.4.4",
"@tailwindcss/typography": "^0.5.16",
"@tanstack/react-query": "^5.86.0",
"ag-grid-community": "^34.2.0",

View file

@ -50,35 +50,61 @@ async function proxyRequest(
try {
let body: string | ArrayBuffer | undefined = undefined;
let willSendBody = false;
if (request.method !== 'GET' && request.method !== 'HEAD') {
const contentType = request.headers.get('content-type') || '';
const contentLength = request.headers.get('content-length');
// For file uploads (multipart/form-data), preserve binary data
if (contentType.includes('multipart/form-data')) {
body = await request.arrayBuffer();
const buf = await request.arrayBuffer();
if (buf && buf.byteLength > 0) {
body = buf;
willSendBody = true;
}
} else {
// For JSON and other text-based content, use text
body = await request.text();
const text = await request.text();
if (text && text.length > 0) {
body = text;
willSendBody = true;
}
}
// Guard against incorrect non-zero content-length when there is no body
if (!willSendBody && contentLength) {
// We'll drop content-length/header below
}
}
const headers = new Headers();
// Copy relevant headers from the original request
for (const [key, value] of request.headers.entries()) {
if (!key.toLowerCase().startsWith('host') &&
!key.toLowerCase().startsWith('x-forwarded') &&
!key.toLowerCase().startsWith('x-real-ip')) {
headers.set(key, value);
const lower = key.toLowerCase();
if (
lower.startsWith('host') ||
lower.startsWith('x-forwarded') ||
lower.startsWith('x-real-ip') ||
lower === 'content-length' ||
(!willSendBody && lower === 'content-type')
) {
continue;
}
headers.set(key, value);
}
const response = await fetch(backendUrl, {
const init: RequestInit = {
method: request.method,
headers,
body,
});
};
if (willSendBody) {
// Convert ArrayBuffer to Uint8Array to satisfy BodyInit in all environments
const bodyInit: BodyInit = typeof body === 'string' ? body : new Uint8Array(body as ArrayBuffer);
init.body = bodyInit;
}
const response = await fetch(backendUrl, init);
const responseBody = await response.text();
const responseHeaders = new Headers();

View file

@ -0,0 +1,50 @@
import { useMutation, useQueryClient } from "@tanstack/react-query";
import { KnowledgeFilter } from "../queries/useGetFiltersSearchQuery";
export interface CreateFilterRequest {
name: string;
description?: string;
queryData: string; // stringified ParsedQueryData
}
export interface CreateFilterResponse {
success: boolean;
filter: KnowledgeFilter;
message?: string;
}
async function createFilter(
data: CreateFilterRequest,
): Promise<CreateFilterResponse> {
const response = await fetch("/api/knowledge-filter", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
name: data.name,
description: data.description ?? "",
queryData: data.queryData,
}),
});
const json = await response.json().catch(() => ({}));
if (!response.ok) {
const errorMessage = (json && (json.error as string)) || "Failed to create knowledge filter";
throw new Error(errorMessage);
}
return json as CreateFilterResponse;
}
export const useCreateFilter = () => {
const queryClient = useQueryClient();
return useMutation({
mutationFn: createFilter,
onSuccess: () => {
queryClient.invalidateQueries({ queryKey: ["knowledge-filters"]});
},
});
};

View file

@ -0,0 +1,39 @@
import { useMutation, useQueryClient } from "@tanstack/react-query";
export interface DeleteFilterRequest {
id: string;
}
export interface DeleteFilterResponse {
success: boolean;
message?: string;
}
async function deleteFilter(
data: DeleteFilterRequest,
): Promise<DeleteFilterResponse> {
const response = await fetch(`/api/knowledge-filter/${data.id}`, {
method: "DELETE",
});
const json = await response.json().catch(() => ({}));
if (!response.ok) {
const errorMessage = (json && (json.error as string)) || "Failed to delete knowledge filter";
throw new Error(errorMessage);
}
return (json as DeleteFilterResponse) || { success: true };
}
export const useDeleteFilter = () => {
const queryClient = useQueryClient();
return useMutation({
mutationFn: deleteFilter,
onSuccess: () => {
// Invalidate filters queries so UI refreshes automatically
queryClient.invalidateQueries({ queryKey: ["knowledge-filters"] });
},
});
};

View file

@ -0,0 +1,52 @@
import { useMutation, useQueryClient } from "@tanstack/react-query";
import { KnowledgeFilter } from "../queries/useGetFiltersSearchQuery";
export interface UpdateFilterRequest {
id: string;
name?: string;
description?: string;
queryData?: string; // stringified ParsedQueryData
}
export interface UpdateFilterResponse {
success: boolean;
filter: KnowledgeFilter;
message?: string;
}
async function updateFilter(data: UpdateFilterRequest): Promise<UpdateFilterResponse> {
// Build a body with only provided fields
const body: Record<string, unknown> = {};
if (typeof data.name !== "undefined") body.name = data.name;
if (typeof data.description !== "undefined") body.description = data.description;
if (typeof data.queryData !== "undefined") body.queryData = data.queryData;
const response = await fetch(`/api/knowledge-filter/${data.id}`, {
method: "PUT",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify(body),
});
const json = await response.json().catch(() => ({}));
if (!response.ok) {
const errorMessage = (json && (json.error as string)) || "Failed to update knowledge filter";
throw new Error(errorMessage);
}
return json as UpdateFilterResponse;
}
export const useUpdateFilter = () => {
const queryClient = useQueryClient();
return useMutation({
mutationFn: updateFilter,
onSuccess: () => {
// Refresh any knowledge filter lists/searches
queryClient.invalidateQueries({ queryKey: ["knowledge-filters"] });
},
});
};

View file

@ -0,0 +1,47 @@
import {
useQuery,
useQueryClient,
type UseQueryOptions,
} from "@tanstack/react-query";
export interface KnowledgeFilter {
id: string;
name: string;
description: string;
query_data: string;
owner: string;
created_at: string;
updated_at: string;
}
export const useGetFiltersSearchQuery = (
search: string,
limit = 20,
options?: Omit<UseQueryOptions<KnowledgeFilter[]>, "queryKey" | "queryFn">
) => {
const queryClient = useQueryClient();
async function getFilters(): Promise<KnowledgeFilter[]> {
const response = await fetch("/api/knowledge-filter/search", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ query: search, limit }),
});
const json = await response.json();
if (!response.ok || !json.success) {
// ensure we always return a KnowledgeFilter[] to satisfy the return type
return [];
}
return (json.filters || []) as KnowledgeFilter[];
}
return useQuery<KnowledgeFilter[]>(
{
queryKey: ["knowledge-filters", search, limit],
queryFn: getFilters,
...options,
},
queryClient
);
};

View file

@ -0,0 +1,47 @@
import { useQuery, useQueryClient, type UseQueryOptions } from "@tanstack/react-query";
export interface FacetBucket {
key: string;
count: number;
}
export interface SearchAggregations {
data_sources?: { buckets: FacetBucket[] };
document_types?: { buckets: FacetBucket[] };
owners?: { buckets: FacetBucket[] };
connector_types?: { buckets: FacetBucket[] };
}
type Options = Omit<UseQueryOptions<SearchAggregations>, "queryKey" | "queryFn">;
export const useGetSearchAggregations = (
query: string,
limit: number,
scoreThreshold: number,
options?: Options
) => {
const queryClient = useQueryClient();
async function fetchAggregations(): Promise<SearchAggregations> {
const response = await fetch("/api/search", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ query, limit, scoreThreshold }),
});
const json = await response.json().catch(() => ({}));
if (!response.ok) {
throw new Error((json && json.error) || "Failed to load search aggregations");
}
return (json.aggregations || {}) as SearchAggregations;
}
return useQuery<SearchAggregations>({
queryKey: ["search-aggregations", query, limit, scoreThreshold],
queryFn: fetchAggregations,
placeholderData: prev => prev,
...options,
}, queryClient);
};

View file

@ -162,7 +162,7 @@
}
.side-bar-arrangement {
@apply flex h-full w-[14.5rem] flex-col overflow-hidden border-r scrollbar-hide;
@apply flex h-full w-[18rem] flex-col overflow-hidden border-r scrollbar-hide;
}
.side-bar-search-div-placement {

View file

@ -4,25 +4,18 @@ import {
Building2,
Cloud,
HardDrive,
Loader2,
Search,
Trash2,
X,
} from "lucide-react";
import { AgGridReact, CustomCellRendererProps } from "ag-grid-react";
import {
type FormEvent,
useCallback,
useEffect,
useState,
useRef,
} from "react";
import { useCallback, useState, useRef, ChangeEvent } from "react";
import { useRouter } from "next/navigation";
import { SiGoogledrive } from "react-icons/si";
import { TbBrandOnedrive } from "react-icons/tb";
import { KnowledgeDropdown } from "@/components/knowledge-dropdown";
import { ProtectedRoute } from "@/components/protected-route";
import { Button } from "@/components/ui/button";
import { Input } from "@/components/ui/input";
import { useKnowledgeFilter } from "@/contexts/knowledge-filter-context";
import { useTask } from "@/contexts/task-context";
import { type File, useGetSearchQuery } from "../api/queries/useGetSearchQuery";
@ -59,39 +52,22 @@ function getSourceIcon(connectorType?: string) {
function SearchPage() {
const router = useRouter();
const { isMenuOpen } = useTask();
const { parsedFilterData, isPanelOpen } = useKnowledgeFilter();
const [query, setQuery] = useState("");
const [queryInputText, setQueryInputText] = useState("");
const { selectedFilter, setSelectedFilter, parsedFilterData, isPanelOpen } =
useKnowledgeFilter();
const [selectedRows, setSelectedRows] = useState<File[]>([]);
const [showBulkDeleteDialog, setShowBulkDeleteDialog] = useState(false);
const deleteDocumentMutation = useDeleteDocument();
const {
data = [],
isFetching,
refetch: refetchSearch,
} = useGetSearchQuery(query, parsedFilterData);
// Update query when global filter changes
useEffect(() => {
if (parsedFilterData?.query) {
setQueryInputText(parsedFilterData.query);
}
}, [parsedFilterData]);
const handleSearch = useCallback(
(e?: FormEvent<HTMLFormElement>) => {
if (e) e.preventDefault();
if (query.trim() === queryInputText.trim()) {
refetchSearch();
return;
}
setQuery(queryInputText);
},
[queryInputText, refetchSearch, query]
const { data = [], isFetching } = useGetSearchQuery(
parsedFilterData?.query || "*",
parsedFilterData
);
const handleTableSearch = (e: ChangeEvent<HTMLInputElement>) => {
gridRef.current?.api.setGridOption("quickFilterText", e.target.value);
};
const fileResults = data as File[];
const gridRef = useRef<AgGridReact>(null);
@ -147,6 +123,7 @@ function SearchPage() {
{
field: "avgScore",
headerName: "Avg score",
initialFlex: 0.5,
cellRenderer: ({ value }: CustomCellRendererProps<File>) => {
return (
<span className="text-xs text-green-400 bg-green-400/20 px-2 py-1 rounded">
@ -167,9 +144,8 @@ function SearchPage() {
},
colId: "actions",
filter: false,
width: 60,
minWidth: 60,
maxWidth: 60,
minWidth: 0,
width: 40,
resizable: false,
sortable: false,
initialFlex: 0,
@ -244,19 +220,29 @@ function SearchPage() {
</div>
{/* Search Input Area */}
<div className="flex-shrink-0 mb-6 lg:max-w-[75%] xl:max-w-[50%]">
<form onSubmit={handleSearch} className="flex gap-3">
<Input
name="search-query"
id="search-query"
type="text"
defaultValue={parsedFilterData?.query}
value={queryInputText}
onChange={(e) => setQueryInputText(e.target.value)}
placeholder="Search your documents..."
className="flex-1 bg-muted/20 rounded-lg border border-border/50 px-4 py-3 focus-visible:ring-1 focus-visible:ring-ring"
/>
<Button
<div className="flex-shrink-0 mb-6 xl:max-w-[75%]">
<form className="flex gap-3">
<div className="primary-input min-h-10 !flex items-center flex-nowrap gap-2 focus-within:border-foreground transition-colors !py-0">
{selectedFilter?.name && (
<div className="flex items-center gap-1 bg-blue-500/20 text-blue-400 px-1.5 py-0.5 rounded max-w-[300px]">
<span className="truncate">{selectedFilter?.name}</span>
<X
aria-label="Remove filter"
className="h-4 w-4 flex-shrink-0 cursor-pointer"
onClick={() => setSelectedFilter(null)}
/>
</div>
)}
<input
className="bg-transparent w-full h-full focus:outline-none focus-visible:outline-none placeholder:font-mono"
name="search-query"
id="search-query"
type="text"
placeholder="Search your documents..."
onChange={handleTableSearch}
/>
</div>
{/* <Button
type="submit"
variant="outline"
className="rounded-lg p-0 flex-shrink-0"
@ -266,7 +252,7 @@ function SearchPage() {
) : (
<Search className="h-4 w-4" />
)}
</Button>
</Button> */}
{/* //TODO: Implement sync button */}
{/* <Button
type="button"
@ -276,15 +262,16 @@ function SearchPage() {
>
Sync
</Button> */}
<Button
type="button"
variant="destructive"
className="rounded-lg flex-shrink-0"
onClick={() => setShowBulkDeleteDialog(true)}
disabled={selectedRows.length === 0}
>
<Trash2 className="h-4 w-4" /> Delete
</Button>
{selectedRows.length > 0 && (
<Button
type="button"
variant="destructive"
className="rounded-lg flex-shrink-0"
onClick={() => setShowBulkDeleteDialog(true)}
>
<Trash2 className="h-4 w-4" /> Delete
</Button>
)}
</form>
</div>
<AgGridReact
@ -298,8 +285,8 @@ function SearchPage() {
rowMultiSelectWithClick={false}
suppressRowClickSelection={true}
getRowId={(params) => params.data.filename}
domLayout="autoHeight"
onSelectionChanged={onSelectionChanged}
suppressHorizontalScroll={false}
noRowsOverlayComponent={() => (
<div className="text-center">
<Search className="h-12 w-12 mx-auto mb-4 text-muted-foreground/50" />

View file

@ -1,10 +1,11 @@
"use client";
import { useRouter } from "next/navigation";
import { Suspense, useEffect, useState } from "react";
import { toast } from "sonner";
import {
useOnboardingMutation,
type OnboardingVariables,
type OnboardingVariables,
useOnboardingMutation,
} from "@/app/api/mutations/useOnboardingMutation";
import IBMLogo from "@/components/logo/ibm-logo";
import OllamaLogo from "@/components/logo/ollama-logo";
@ -12,198 +13,198 @@ import OpenAILogo from "@/components/logo/openai-logo";
import { ProtectedRoute } from "@/components/protected-route";
import { Button } from "@/components/ui/button";
import {
Card,
CardContent,
CardFooter,
CardHeader,
Card,
CardContent,
CardFooter,
CardHeader,
} from "@/components/ui/card";
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/ui/tooltip";
import { useGetSettingsQuery } from "../api/queries/useGetSettingsQuery";
import { IBMOnboarding } from "./components/ibm-onboarding";
import { OllamaOnboarding } from "./components/ollama-onboarding";
import { OpenAIOnboarding } from "./components/openai-onboarding";
import {
Tooltip,
TooltipContent,
TooltipTrigger,
} from "@/components/ui/tooltip";
import { useGetSettingsQuery } from "../api/queries/useGetSettingsQuery";
import { useRouter } from "next/navigation";
function OnboardingPage() {
const { data: settingsDb, isLoading: isSettingsLoading } =
useGetSettingsQuery();
const { data: settingsDb, isLoading: isSettingsLoading } =
useGetSettingsQuery();
const redirect = "/";
const redirect = "/";
const router = useRouter();
const router = useRouter();
// Redirect if already authenticated or in no-auth mode
useEffect(() => {
if (!isSettingsLoading && settingsDb && settingsDb.edited) {
router.push(redirect);
}
}, [isSettingsLoading, redirect]);
// Redirect if already authenticated or in no-auth mode
useEffect(() => {
if (!isSettingsLoading && settingsDb && settingsDb.edited) {
router.push(redirect);
}
}, [isSettingsLoading, settingsDb, router]);
const [modelProvider, setModelProvider] = useState<string>("openai");
const [modelProvider, setModelProvider] = useState<string>("openai");
const [sampleDataset, setSampleDataset] = useState<boolean>(true);
const [sampleDataset, setSampleDataset] = useState<boolean>(true);
const handleSetModelProvider = (provider: string) => {
setModelProvider(provider);
setSettings({
model_provider: provider,
embedding_model: "",
llm_model: "",
});
};
const handleSetModelProvider = (provider: string) => {
setModelProvider(provider);
setSettings({
model_provider: provider,
embedding_model: "",
llm_model: "",
});
};
const [settings, setSettings] = useState<OnboardingVariables>({
model_provider: modelProvider,
embedding_model: "",
llm_model: "",
});
const [settings, setSettings] = useState<OnboardingVariables>({
model_provider: modelProvider,
embedding_model: "",
llm_model: "",
});
// Mutations
const onboardingMutation = useOnboardingMutation({
onSuccess: (data) => {
toast.success("Onboarding completed successfully!");
console.log("Onboarding completed successfully", data);
},
onError: (error) => {
toast.error("Failed to complete onboarding", {
description: error.message,
});
},
});
// Mutations
const onboardingMutation = useOnboardingMutation({
onSuccess: (data) => {
toast.success("Onboarding completed successfully!");
console.log("Onboarding completed successfully", data);
router.push(redirect);
},
onError: (error) => {
toast.error("Failed to complete onboarding", {
description: error.message,
});
},
});
const handleComplete = () => {
if (
!settings.model_provider ||
!settings.llm_model ||
!settings.embedding_model
) {
toast.error("Please complete all required fields");
return;
}
const handleComplete = () => {
if (
!settings.model_provider ||
!settings.llm_model ||
!settings.embedding_model
) {
toast.error("Please complete all required fields");
return;
}
// Prepare onboarding data
const onboardingData: OnboardingVariables = {
model_provider: settings.model_provider,
llm_model: settings.llm_model,
embedding_model: settings.embedding_model,
sample_data: sampleDataset,
};
// Prepare onboarding data
const onboardingData: OnboardingVariables = {
model_provider: settings.model_provider,
llm_model: settings.llm_model,
embedding_model: settings.embedding_model,
sample_data: sampleDataset,
};
// Add API key if available
if (settings.api_key) {
onboardingData.api_key = settings.api_key;
}
// Add API key if available
if (settings.api_key) {
onboardingData.api_key = settings.api_key;
}
// Add endpoint if available
if (settings.endpoint) {
onboardingData.endpoint = settings.endpoint;
}
// Add endpoint if available
if (settings.endpoint) {
onboardingData.endpoint = settings.endpoint;
}
// Add project_id if available
if (settings.project_id) {
onboardingData.project_id = settings.project_id;
}
// Add project_id if available
if (settings.project_id) {
onboardingData.project_id = settings.project_id;
}
onboardingMutation.mutate(onboardingData);
};
onboardingMutation.mutate(onboardingData);
};
const isComplete = !!settings.llm_model && !!settings.embedding_model;
const isComplete = !!settings.llm_model && !!settings.embedding_model;
return (
<div
className="min-h-dvh w-full flex gap-5 flex-col items-center justify-center bg-background p-4"
style={{
backgroundImage: "url('/images/background.png')",
backgroundSize: "cover",
backgroundPosition: "center",
}}
>
<div className="flex flex-col items-center gap-5 min-h-[550px] w-full">
<div className="flex flex-col items-center justify-center gap-4">
<h1 className="text-2xl font-medium font-chivo">
Configure your models
</h1>
<p className="text-sm text-muted-foreground">[description of task]</p>
</div>
<Card className="w-full max-w-[580px]">
<Tabs
defaultValue={modelProvider}
onValueChange={handleSetModelProvider}
>
<CardHeader>
<TabsList>
<TabsTrigger value="openai">
<OpenAILogo className="w-4 h-4" />
OpenAI
</TabsTrigger>
<TabsTrigger value="watsonx">
<IBMLogo className="w-4 h-4" />
IBM
</TabsTrigger>
<TabsTrigger value="ollama">
<OllamaLogo className="w-4 h-4" />
Ollama
</TabsTrigger>
</TabsList>
</CardHeader>
<CardContent>
<TabsContent value="openai">
<OpenAIOnboarding
setSettings={setSettings}
sampleDataset={sampleDataset}
setSampleDataset={setSampleDataset}
/>
</TabsContent>
<TabsContent value="watsonx">
<IBMOnboarding
setSettings={setSettings}
sampleDataset={sampleDataset}
setSampleDataset={setSampleDataset}
/>
</TabsContent>
<TabsContent value="ollama">
<OllamaOnboarding
setSettings={setSettings}
sampleDataset={sampleDataset}
setSampleDataset={setSampleDataset}
/>
</TabsContent>
</CardContent>
</Tabs>
<CardFooter className="flex justify-end">
<Tooltip>
<TooltipTrigger asChild>
<Button
size="sm"
onClick={handleComplete}
disabled={!isComplete}
loading={onboardingMutation.isPending}
>
Complete
</Button>
</TooltipTrigger>
<TooltipContent>
{!isComplete ? "Please fill in all required fields" : ""}
</TooltipContent>
</Tooltip>
</CardFooter>
</Card>
</div>
</div>
);
return (
<div
className="min-h-dvh w-full flex gap-5 flex-col items-center justify-center bg-background p-4"
style={{
backgroundImage: "url('/images/background.png')",
backgroundSize: "cover",
backgroundPosition: "center",
}}
>
<div className="flex flex-col items-center gap-5 min-h-[550px] w-full">
<div className="flex flex-col items-center justify-center gap-4">
<h1 className="text-2xl font-medium font-chivo">
Configure your models
</h1>
<p className="text-sm text-muted-foreground">[description of task]</p>
</div>
<Card className="w-full max-w-[580px]">
<Tabs
defaultValue={modelProvider}
onValueChange={handleSetModelProvider}
>
<CardHeader>
<TabsList>
<TabsTrigger value="openai">
<OpenAILogo className="w-4 h-4" />
OpenAI
</TabsTrigger>
<TabsTrigger value="watsonx">
<IBMLogo className="w-4 h-4" />
IBM
</TabsTrigger>
<TabsTrigger value="ollama">
<OllamaLogo className="w-4 h-4" />
Ollama
</TabsTrigger>
</TabsList>
</CardHeader>
<CardContent>
<TabsContent value="openai">
<OpenAIOnboarding
setSettings={setSettings}
sampleDataset={sampleDataset}
setSampleDataset={setSampleDataset}
/>
</TabsContent>
<TabsContent value="watsonx">
<IBMOnboarding
setSettings={setSettings}
sampleDataset={sampleDataset}
setSampleDataset={setSampleDataset}
/>
</TabsContent>
<TabsContent value="ollama">
<OllamaOnboarding
setSettings={setSettings}
sampleDataset={sampleDataset}
setSampleDataset={setSampleDataset}
/>
</TabsContent>
</CardContent>
</Tabs>
<CardFooter className="flex justify-end">
<Tooltip>
<TooltipTrigger asChild>
<Button
size="sm"
onClick={handleComplete}
disabled={!isComplete}
loading={onboardingMutation.isPending}
>
Complete
</Button>
</TooltipTrigger>
<TooltipContent>
{!isComplete ? "Please fill in all required fields" : ""}
</TooltipContent>
</Tooltip>
</CardFooter>
</Card>
</div>
</div>
);
}
export default function ProtectedOnboardingPage() {
return (
<ProtectedRoute>
<Suspense fallback={<div>Loading onboarding...</div>}>
<OnboardingPage />
</Suspense>
</ProtectedRoute>
);
return (
<ProtectedRoute>
<Suspense fallback={<div>Loading onboarding...</div>}>
<OnboardingPage />
</Suspense>
</ProtectedRoute>
);
}

View file

@ -1,10 +1,8 @@
"use client";
import { Bell, Loader2 } from "lucide-react";
import Image from "next/image";
import { usePathname } from "next/navigation";
import { useGetSettingsQuery } from "@/app/api/queries/useGetSettingsQuery";
import { KnowledgeFilterDropdown } from "@/components/knowledge-filter-dropdown";
import { KnowledgeFilterPanel } from "@/components/knowledge-filter-panel";
import { Navigation } from "@/components/navigation";
import { TaskNotificationMenu } from "@/components/task-notification-menu";
@ -20,8 +18,7 @@ import Logo from "@/components/logo/logo";
export function LayoutWrapper({ children }: { children: React.ReactNode }) {
const pathname = usePathname();
const { tasks, isMenuOpen, toggleMenu } = useTask();
const { selectedFilter, setSelectedFilter, isPanelOpen } =
useKnowledgeFilter();
const { isPanelOpen } = useKnowledgeFilter();
const { isLoading, isAuthenticated, isNoAuthMode } = useAuth();
const { isLoading: isSettingsLoading, data: settings } = useGetSettingsQuery({
enabled: isAuthenticated || isNoAuthMode,
@ -36,7 +33,7 @@ export function LayoutWrapper({ children }: { children: React.ReactNode }) {
(task) =>
task.status === "pending" ||
task.status === "running" ||
task.status === "processing",
task.status === "processing"
);
// Show loading state when backend isn't ready
@ -70,10 +67,10 @@ export function LayoutWrapper({ children }: { children: React.ReactNode }) {
<div className="header-end-division">
<div className="header-end-display">
{/* Knowledge Filter Dropdown */}
<KnowledgeFilterDropdown
{/* <KnowledgeFilterDropdown
selectedFilter={selectedFilter}
onFilterSelect={setSelectedFilter}
/>
/> */}
{/* GitHub Star Button */}
{/* <GitHubStarButton repo="phact/openrag" /> */}
@ -115,10 +112,10 @@ export function LayoutWrapper({ children }: { children: React.ReactNode }) {
isPanelOpen
? "md:pr-80"
: // Only KF panel open: 320px
"md:pr-6" // Neither open: 24px
"md:pr-0" // Neither open: 24px
}`}
>
<div className="container py-6 lg:py-8">{children}</div>
<div className="container py-6 lg:py-8 px-4 lg:px-6">{children}</div>
</main>
<TaskNotificationMenu />
<KnowledgeFilterPanel />

View file

@ -4,6 +4,7 @@ import tailwindcssTypography from "@tailwindcss/typography";
import { fontFamily } from "tailwindcss/defaultTheme";
import plugin from "tailwindcss/plugin";
import tailwindcssAnimate from "tailwindcss-animate";
import tailwindcssLineClamp from "@tailwindcss/line-clamp";
const config = {
darkMode: ["class"],
@ -175,6 +176,7 @@ const config = {
},
plugins: [
tailwindcssAnimate,
tailwindcssLineClamp,
tailwindcssForms({
strategy: "class",
}),

View file

@ -496,12 +496,18 @@ class AppClients:
WATSONX_LLM_COMPONENT_PATH = os.getenv(
"WATSONX_LLM_COMPONENT_PATH", "flows/components/watsonx_llm.json"
)
WATSONX_LLM_TEXT_COMPONENT_PATH = os.getenv(
"WATSONX_LLM_TEXT_COMPONENT_PATH", "flows/components/watsonx_llm_text.json"
)
WATSONX_EMBEDDING_COMPONENT_PATH = os.getenv(
"WATSONX_EMBEDDING_COMPONENT_PATH", "flows/components/watsonx_embedding.json"
)
OLLAMA_LLM_COMPONENT_PATH = os.getenv(
"OLLAMA_LLM_COMPONENT_PATH", "flows/components/ollama_llm.json"
)
OLLAMA_LLM_TEXT_COMPONENT_PATH = os.getenv(
"OLLAMA_LLM_TEXT_COMPONENT_PATH", "flows/components/ollama_llm_text.json"
)
OLLAMA_EMBEDDING_COMPONENT_PATH = os.getenv(
"OLLAMA_EMBEDDING_COMPONENT_PATH", "flows/components/ollama_embedding.json"
)
@ -514,6 +520,9 @@ OPENAI_EMBEDDING_COMPONENT_ID = os.getenv(
OPENAI_LLM_COMPONENT_ID = os.getenv(
"OPENAI_LLM_COMPONENT_ID", "LanguageModelComponent-0YME7"
)
OPENAI_LLM_TEXT_COMPONENT_ID = os.getenv(
"OPENAI_LLM_TEXT_COMPONENT_ID", "LanguageModelComponent-NSTA6"
)
# Provider-specific component IDs
WATSONX_EMBEDDING_COMPONENT_ID = os.getenv(
@ -522,11 +531,18 @@ WATSONX_EMBEDDING_COMPONENT_ID = os.getenv(
WATSONX_LLM_COMPONENT_ID = os.getenv(
"WATSONX_LLM_COMPONENT_ID", "IBMwatsonxModel-jA4Nw"
)
WATSONX_LLM_TEXT_COMPONENT_ID = os.getenv(
"WATSONX_LLM_TEXT_COMPONENT_ID", "IBMwatsonxModel-18kmA"
)
OLLAMA_EMBEDDING_COMPONENT_ID = os.getenv(
"OLLAMA_EMBEDDING_COMPONENT_ID", "OllamaEmbeddings-4ah5Q"
)
OLLAMA_LLM_COMPONENT_ID = os.getenv("OLLAMA_LLM_COMPONENT_ID", "OllamaModel-eCsJx")
OLLAMA_LLM_TEXT_COMPONENT_ID = os.getenv(
"OLLAMA_LLM_TEXT_COMPONENT_ID", "OllamaModel-XDGqZ"
)
# Docling component ID for ingest flow
DOCLING_COMPONENT_ID = os.getenv("DOCLING_COMPONENT_ID", "DoclingRemote-78KoX")

View file

@ -3,8 +3,13 @@ from config.settings import (
LANGFLOW_URL,
LANGFLOW_CHAT_FLOW_ID,
LANGFLOW_INGEST_FLOW_ID,
OLLAMA_LLM_TEXT_COMPONENT_ID,
OLLAMA_LLM_TEXT_COMPONENT_PATH,
OPENAI_EMBEDDING_COMPONENT_ID,
OPENAI_LLM_COMPONENT_ID,
OPENAI_LLM_TEXT_COMPONENT_ID,
WATSONX_LLM_TEXT_COMPONENT_ID,
WATSONX_LLM_TEXT_COMPONENT_PATH,
clients,
WATSONX_LLM_COMPONENT_PATH,
WATSONX_EMBEDDING_COMPONENT_PATH,
@ -146,7 +151,7 @@ class FlowsService:
try:
# Load component templates based on provider
llm_template, embedding_template = self._load_component_templates(provider)
llm_template, embedding_template, llm_text_template = self._load_component_templates(provider)
logger.info(f"Assigning {provider} components")
@ -158,6 +163,7 @@ class FlowsService:
"flow_id": NUDGES_FLOW_ID,
"embedding_id": OPENAI_EMBEDDING_COMPONENT_ID,
"llm_id": OPENAI_LLM_COMPONENT_ID,
"llm_text_id": OPENAI_LLM_TEXT_COMPONENT_ID,
},
{
"name": "retrieval",
@ -165,6 +171,7 @@ class FlowsService:
"flow_id": LANGFLOW_CHAT_FLOW_ID,
"embedding_id": OPENAI_EMBEDDING_COMPONENT_ID,
"llm_id": OPENAI_LLM_COMPONENT_ID,
"llm_text_id": None,
},
{
"name": "ingest",
@ -172,6 +179,7 @@ class FlowsService:
"flow_id": LANGFLOW_INGEST_FLOW_ID,
"embedding_id": OPENAI_EMBEDDING_COMPONENT_ID,
"llm_id": None, # Ingestion flow might not have LLM
"llm_text_id": None, # Ingestion flow might not have LLM Text
},
]
@ -181,7 +189,7 @@ class FlowsService:
for config in flow_configs:
try:
result = await self._update_flow_components(
config, llm_template, embedding_template
config, llm_template, embedding_template, llm_text_template
)
results.append(result)
logger.info(f"Successfully updated {config['name']} flow")
@ -215,9 +223,11 @@ class FlowsService:
if provider == "watsonx":
llm_path = WATSONX_LLM_COMPONENT_PATH
embedding_path = WATSONX_EMBEDDING_COMPONENT_PATH
llm_text_path = WATSONX_LLM_TEXT_COMPONENT_PATH
elif provider == "ollama":
llm_path = OLLAMA_LLM_COMPONENT_PATH
embedding_path = OLLAMA_EMBEDDING_COMPONENT_PATH
llm_text_path = OLLAMA_LLM_TEXT_COMPONENT_PATH
else:
raise ValueError(f"Unsupported provider: {provider}")
@ -246,21 +256,31 @@ class FlowsService:
with open(embedding_full_path, "r") as f:
embedding_template = json.load(f)
logger.info(f"Loaded component templates for {provider}")
return llm_template, embedding_template
# Load LLM Text template
llm_text_full_path = os.path.join(project_root, llm_text_path)
if not os.path.exists(llm_text_full_path):
raise FileNotFoundError(
f"LLM Text component template not found at: {llm_text_full_path}"
)
async def _update_flow_components(self, config, llm_template, embedding_template):
with open(llm_text_full_path, "r") as f:
llm_text_template = json.load(f)
logger.info(f"Loaded component templates for {provider}")
return llm_template, embedding_template, llm_text_template
async def _update_flow_components(self, config, llm_template, embedding_template, llm_text_template):
"""Update components in a specific flow"""
flow_name = config["name"]
flow_file = config["file"]
flow_id = config["flow_id"]
old_embedding_id = config["embedding_id"]
old_llm_id = config["llm_id"]
old_llm_text_id = config["llm_text_id"]
# Extract IDs from templates
new_llm_id = llm_template["data"]["id"]
new_embedding_id = embedding_template["data"]["id"]
new_llm_text_id = llm_text_template["data"]["id"]
# Get the project root directory
current_file_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.dirname(current_file_dir)
@ -308,6 +328,21 @@ class FlowsService:
self._replace_node_in_flow(flow_data, old_llm_id, new_llm_node)
components_updated.append(f"llm: {old_llm_id} -> {new_llm_id}")
# Replace LLM component (if exists in this flow)
if old_llm_text_id:
llm_text_node = self._find_node_by_id(flow_data, old_llm_text_id)
if llm_text_node:
# Preserve position
original_position = llm_text_node.get("position", {})
# Replace with new template
new_llm_text_node = llm_text_template.copy()
new_llm_text_node["position"] = original_position
# Replace in flow
self._replace_node_in_flow(flow_data, old_llm_text_id, new_llm_text_node)
components_updated.append(f"llm: {old_llm_text_id} -> {new_llm_text_id}")
# Update all edge references using regex replacement
flow_json_str = json.dumps(flow_data)
@ -326,6 +361,11 @@ class FlowsService:
flow_json_str = re.sub(
re.escape(old_llm_id), new_llm_id, flow_json_str
)
if old_llm_text_id:
flow_json_str = re.sub(
re.escape(old_llm_text_id), new_llm_text_id, flow_json_str
)
flow_json_str = re.sub(
re.escape(old_llm_id.split("-")[0]),
new_llm_id.split("-")[0],
@ -415,7 +455,7 @@ class FlowsService:
]
# Determine target component IDs based on provider
target_embedding_id, target_llm_id = self._get_provider_component_ids(
target_embedding_id, target_llm_id, target_llm_text_id = self._get_provider_component_ids(
provider
)
@ -429,6 +469,7 @@ class FlowsService:
provider,
target_embedding_id,
target_llm_id,
target_llm_text_id,
embedding_model,
llm_model,
endpoint,
@ -471,12 +512,12 @@ class FlowsService:
def _get_provider_component_ids(self, provider: str):
"""Get the component IDs for a specific provider"""
if provider == "watsonx":
return WATSONX_EMBEDDING_COMPONENT_ID, WATSONX_LLM_COMPONENT_ID
return WATSONX_EMBEDDING_COMPONENT_ID, WATSONX_LLM_COMPONENT_ID, WATSONX_LLM_TEXT_COMPONENT_ID
elif provider == "ollama":
return OLLAMA_EMBEDDING_COMPONENT_ID, OLLAMA_LLM_COMPONENT_ID
return OLLAMA_EMBEDDING_COMPONENT_ID, OLLAMA_LLM_COMPONENT_ID, OLLAMA_LLM_TEXT_COMPONENT_ID
elif provider == "openai":
# OpenAI components are the default ones
return OPENAI_EMBEDDING_COMPONENT_ID, OPENAI_LLM_COMPONENT_ID
return OPENAI_EMBEDDING_COMPONENT_ID, OPENAI_LLM_COMPONENT_ID, OPENAI_LLM_TEXT_COMPONENT_ID
else:
raise ValueError(f"Unsupported provider: {provider}")
@ -486,6 +527,7 @@ class FlowsService:
provider: str,
target_embedding_id: str,
target_llm_id: str,
target_llm_text_id: str,
embedding_model: str,
llm_model: str,
endpoint: str = None,
@ -512,7 +554,7 @@ class FlowsService:
embedding_node = self._find_node_by_id(flow_data, target_embedding_id)
if embedding_node:
if self._update_component_fields(
embedding_node, provider, "embedding", embedding_model, endpoint
embedding_node, provider, embedding_model, endpoint
):
updates_made.append(f"embedding model: {embedding_model}")
@ -521,7 +563,15 @@ class FlowsService:
llm_node = self._find_node_by_id(flow_data, target_llm_id)
if llm_node:
if self._update_component_fields(
llm_node, provider, "llm", llm_model, endpoint
llm_node, provider, llm_model, endpoint
):
updates_made.append(f"llm model: {llm_model}")
if target_llm_text_id:
llm_text_node = self._find_node_by_id(flow_data, target_llm_text_id)
if llm_text_node:
if self._update_component_fields(
llm_text_node, provider, llm_model, endpoint
):
updates_made.append(f"llm model: {llm_model}")
@ -569,7 +619,11 @@ class FlowsService:
updated = False
# Update model_name field (common to all providers)
if "model_name" in template:
if provider == "openai" and "model" in template:
template["model"]["value"] = model_value
template["model"]["options"] = [model_value]
updated = True
elif "model_name" in template:
template["model_name"]["value"] = model_value
template["model_name"]["options"] = [model_value]
updated = True

View file

@ -19,10 +19,18 @@ class KnowledgeFilterService:
# Index the knowledge filter document
result = await opensearch_client.index(
index=KNOWLEDGE_FILTERS_INDEX_NAME, id=filter_doc["id"], body=filter_doc
index=KNOWLEDGE_FILTERS_INDEX_NAME,
id=filter_doc["id"],
body=filter_doc,
refresh="wait_for",
)
if result.get("result") == "created":
# Extra safety: ensure visibility in subsequent searches
try:
await opensearch_client.indices.refresh(index=KNOWLEDGE_FILTERS_INDEX_NAME)
except Exception:
pass
return {"success": True, "id": filter_doc["id"], "filter": filter_doc}
else:
return {"success": False, "error": "Failed to create knowledge filter"}
@ -138,11 +146,19 @@ class KnowledgeFilterService:
# Update the document
result = await opensearch_client.update(
index=KNOWLEDGE_FILTERS_INDEX_NAME, id=filter_id, body={"doc": updates}
index=KNOWLEDGE_FILTERS_INDEX_NAME,
id=filter_id,
body={"doc": updates},
refresh="wait_for",
)
if result.get("result") in ["updated", "noop"]:
# Get the updated document
# Ensure visibility before fetching/returning
try:
await opensearch_client.indices.refresh(index=KNOWLEDGE_FILTERS_INDEX_NAME)
except Exception:
pass
updated_doc = await opensearch_client.get(
index=KNOWLEDGE_FILTERS_INDEX_NAME, id=filter_id
)
@ -164,10 +180,17 @@ class KnowledgeFilterService:
)
result = await opensearch_client.delete(
index=KNOWLEDGE_FILTERS_INDEX_NAME, id=filter_id
index=KNOWLEDGE_FILTERS_INDEX_NAME,
id=filter_id,
refresh="wait_for",
)
if result.get("result") == "deleted":
# Extra safety: ensure visibility in subsequent searches
try:
await opensearch_client.indices.refresh(index=KNOWLEDGE_FILTERS_INDEX_NAME)
except Exception:
pass
return {
"success": True,
"message": "Knowledge filter deleted successfully",
@ -230,7 +253,10 @@ class KnowledgeFilterService:
}
result = await opensearch_client.update(
index=KNOWLEDGE_FILTERS_INDEX_NAME, id=filter_id, body=update_body
index=KNOWLEDGE_FILTERS_INDEX_NAME,
id=filter_id,
body=update_body,
refresh="wait_for",
)
if result.get("result") in ["updated", "noop"]:

View file

@ -124,10 +124,8 @@ class ModelsService:
for model in models:
model_name = model.get(JSON_NAME_KEY, "")
# Remove tag if present (e.g., "llama3:latest" -> "llama3")
clean_model_name = model_name.split(":")[0] if model_name else ""
if not clean_model_name:
if not model_name:
continue
logger.debug(f"Checking model: {model_name}")
@ -152,7 +150,7 @@ class ModelsService:
# Check if it's an embedding model
is_embedding = any(
embed_model in clean_model_name.lower()
embed_model in model_name.lower()
for embed_model in self.OLLAMA_EMBEDDING_MODELS
)
@ -160,8 +158,8 @@ class ModelsService:
# Embedding models only need completion capability
embedding_models.append(
{
"value": clean_model_name,
"label": clean_model_name,
"value": model_name,
"label": model_name,
"default": False,
}
)
@ -169,9 +167,9 @@ class ModelsService:
# Language models need both completion and tool calling
language_models.append(
{
"value": clean_model_name,
"label": clean_model_name,
"default": "llama3" in clean_model_name.lower(),
"value": model_name,
"label": model_name,
"default": "llama3" in model_name.lower(),
}
)
except Exception as e: