Merge remote-tracking branch 'origin/main' into feat/delete_session

This commit is contained in:
Lucas Oliveira 2025-09-24 17:39:38 -03:00
commit 85b7aaa7e3
27 changed files with 1570 additions and 963 deletions

View file

@ -9,7 +9,7 @@ LANGFLOW_SECRET_KEY=
LANGFLOW_CHAT_FLOW_ID=1098eea1-6649-4e1d-aed1-b77249fb8dd0 LANGFLOW_CHAT_FLOW_ID=1098eea1-6649-4e1d-aed1-b77249fb8dd0
LANGFLOW_INGEST_FLOW_ID=5488df7c-b93f-4f87-a446-b67028bc0813 LANGFLOW_INGEST_FLOW_ID=5488df7c-b93f-4f87-a446-b67028bc0813
# Ingest flow using docling # Ingest flow using docling
LANGFLOW_INGEST_FLOW_ID=1402618b-e6d1-4ff2-9a11-d6ce71186915 # LANGFLOW_INGEST_FLOW_ID=1402618b-e6d1-4ff2-9a11-d6ce71186915
NUDGES_FLOW_ID=ebc01d31-1976-46ce-a385-b0240327226c NUDGES_FLOW_ID=ebc01d31-1976-46ce-a385-b0240327226c
# Set a strong admin password for OpenSearch; a bcrypt hash is generated at # Set a strong admin password for OpenSearch; a bcrypt hash is generated at

View file

@ -0,0 +1,390 @@
---
title: Quickstart
slug: /quickstart
---
import Icon from "@site/src/components/icon/icon";
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
Get started with OpenRAG by loading your knowledge, swapping out your language model, and then chatting with the OpenRAG API.
## Prerequisites
- Install and start OpenRAG
## Find your way around
1. In OpenRAG, click <Icon name="MessageSquare" aria-hidden="true"/> **Chat**.
2. Ask `What documents are available to you?`
The agent responds with a message summarizing the documents that OpenRAG loads by default, which are PDFs about evaluating data quality when using LLMs in health care.
3. To confirm the agent is correct, click <Icon name="Library" aria-hidden="true"/> **Knowledge**.
The **Knowledge** page lists the documents OpenRAG has ingested into the OpenSearch vector database. Click on a document to display the chunks derived from splitting the default documents into the vector database.
## Add your own knowledge
1. To add documents to your knowledge base, click <Icon name="Plus" aria-hidden="true"/> **Add Knowledge**.
* Select **Add File** to add a single file from your local machine (mapped with the Docker volume mount).
* Select **Process Folder** to process an entire folder of documents from your local machine (mapped with the Docker volume mount).
2. Return to the Chat window and ask a question about your loaded data.
For example, with a manual about a PC tablet loaded, ask `How do I connect this device to WiFI?`
The agent responds with a message indicating it now has your knowledge as context for answering questions.
3. Click the <Icon name="Gear" aria-hidden="true"/> **Function Call: search_documents (tool_call)** that is printed in the Playground.
These events log the agent's request to the tool and the tool's response, so you have direct visibility into your agent's functionality.
If you aren't getting the results you need, you can further tune the knowledge ingestion and agent behavior in the next section.
## Swap out the language model to modify agent behavior
To modify the knowledge ingestion or Agent behavior, click <Icon name="Settings" aria-hidden="true"/> **Settings**.
In this example, you'll try a different LLM to demonstrate how the Agent's response changes.
1. To edit the Agent's behavior, click **Edit in Langflow**.
2. OpenRAG warns you that you're entering Langflow. Click **Proceed**.
3. The OpenRAG Open Search Agent flow appears.
![OpenRAG Open Search Agent Flow](/img/opensearch-agent-flow.png)
4. In the **Language Model** component, under **Model Provider**, select **Anthropic**.
:::note
This guide uses an Anthropic model for demonstration purposes. If you want to use a different provider, change the **Model Provider** and **Model Name** fields, and then provide credentials for your selected provider.
:::
5. Save your flow with <kbd>Command+S</kbd>.
6. In OpenRAG, start a new conversation by clicking the <Icon name="Plus" aria-hidden="true"/> in the **Conversations** tab.
7. Ask the same question as before to demonstrate how a different language model changes the results.
## Integrate OpenRAG into your application
:::tip
Ensure the `openrag-backend` container has port 8000 exposed in your `docker-compose.yml`:
```yaml
openrag-backend:
ports:
- "8000:8000"
```
:::
OpenRAG provides a REST API that you can call from Python, TypeScript, or any HTTP client to chat with your documents.
These example requests are run assuming OpenRAG is in "no-auth" mode.
For complete API documentation, including authentication, request and response parameters, and example requests, see the API documentation.
### Chat with your documents
Prompt OpenRAG at the `/chat` API endpoint.
<Tabs>
<TabItem value="python" label="Python">
```python
import requests
url = "http://localhost:8000/chat"
payload = {
"prompt": "What documents are available to you?",
"previous_response_id": None
}
response = requests.post(url, json=payload)
print("OpenRAG Response:", response.json())
```
</TabItem>
<TabItem value="typescript" label="TypeScript">
```typescript
import fetch from 'node-fetch';
const response = await fetch("http://localhost:8000/chat", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
prompt: "What documents are available to you?",
previous_response_id: null
})
});
const data = await response.json();
console.log("OpenRAG Response:", data);
```
</TabItem>
<TabItem value="curl" label="curl">
```bash
curl -X POST "http://localhost:8000/chat" \
-H "Content-Type: application/json" \
-d '{
"prompt": "What documents are available to you?",
"previous_response_id": null
}'
```
</TabItem>
</Tabs>
<details closed>
<summary>Response</summary>
```
{
"response": "I have access to a wide range of documents depending on the context and the tools enabled in this environment. Specifically, I can search for and retrieve documents related to various topics such as technical papers, articles, manuals, guides, knowledge base entries, and other text-based resources. If you specify a particular subject or type of document you're interested in, I can try to locate relevant materials for you. Let me know what you need!",
"response_id": "resp_68d3fdbac93081958b8781b97919fe7007f98bd83932fa1a"
}
```
</details>
### Search your documents
Search your document knowledge base at the `/search` endpoint.
<Tabs>
<TabItem value="python" label="Python">
```python
import requests
url = "http://localhost:8000/search"
payload = {"query": "healthcare data quality", "limit": 5}
response = requests.post(url, json=payload)
results = response.json()
print("Search Results:")
for result in results.get("results", []):
print(f"- {result.get('filename')}: {result.get('text', '')[:100]}...")
```
</TabItem>
<TabItem value="typescript" label="TypeScript">
```typescript
const response = await fetch("http://localhost:8000/search", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
query: "healthcare data quality",
limit: 5
})
});
const results = await response.json();
console.log("Search Results:");
results.results?.forEach((result, index) => {
const filename = result.filename || 'Unknown';
const text = result.text?.substring(0, 100) || '';
console.log(`${index + 1}. ${filename}: ${text}...`);
});
```
</TabItem>
<TabItem value="curl" label="curl">
```bash
curl -X POST "http://localhost:8000/search" \
-H "Content-Type: application/json" \
-d '{"query": "healthcare data quality", "limit": 5}'
```
</TabItem>
</Tabs>
<details closed>
<summary>Example response</summary>
```
Found 5 results
1. 2506.08231v1.pdf: variables with high performance metrics. These variables might also require fewer replication analys...
2. 2506.08231v1.pdf: on EHR data and may lack the clinical domain knowledge needed to perform well on the tasks where EHR...
3. 2506.08231v1.pdf: Abstract Large language models (LLMs) are increasingly used to extract clinical data from electronic...
4. 2506.08231v1.pdf: these multidimensional assessments, the framework not only quantifies accuracy, but can also be appl...
5. 2506.08231v1.pdf: observed in only the model metrics, but not the abstractor metrics, it indicates that model errors m...
```
</details>
### Use chat and search together
Create a complete chat application that combines an interactive terminal chat with session continuity and search functionality.
<Tabs>
<TabItem value="python" label="Python">
```python
import requests
# Configuration
OPENRAG_BASE_URL = "http://localhost:8000"
CHAT_URL = f"{OPENRAG_BASE_URL}/chat"
SEARCH_URL = f"{OPENRAG_BASE_URL}/search"
DEFAULT_SEARCH_LIMIT = 5
def chat_with_openrag(message, previous_response_id=None):
try:
response = requests.post(CHAT_URL, json={
"prompt": message,
"previous_response_id": previous_response_id
})
response.raise_for_status()
data = response.json()
return data.get("response"), data.get("response_id")
except Exception as e:
return f"Error: {str(e)}", None
def search_documents(query, limit=DEFAULT_SEARCH_LIMIT):
try:
response = requests.post(SEARCH_URL, json={
"query": query,
"limit": limit
})
response.raise_for_status()
data = response.json()
return data.get("results", [])
except Exception as e:
return []
# Interactive chat with session continuity and search
previous_response_id = None
while True:
question = input("Your question (or 'search <query>' to search): ").strip()
if question.lower() in ['quit', 'exit', 'q']:
break
if not question:
continue
if question.lower().startswith('search '):
query = question[7:].strip()
print("Searching documents...")
results = search_documents(query)
print(f"\nFound {len(results)} results:")
for i, result in enumerate(results, 1):
filename = result.get('filename', 'Unknown')
text = result.get('text', '')[:100]
print(f"{i}. {filename}: {text}...")
print()
else:
print("OpenRAG is thinking...")
result, response_id = chat_with_openrag(question, previous_response_id)
print(f"OpenRAG: {result}\n")
previous_response_id = response_id
```
</TabItem>
<TabItem value="typescript" label="TypeScript">
```ts
import fetch from 'node-fetch';
// Configuration
const OPENRAG_BASE_URL = "http://localhost:8000";
const CHAT_URL = `${OPENRAG_BASE_URL}/chat`;
const SEARCH_URL = `${OPENRAG_BASE_URL}/search`;
const DEFAULT_SEARCH_LIMIT = 5;
async function chatWithOpenRAG(message: string, previousResponseId?: string | null) {
try {
const response = await fetch(CHAT_URL, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
prompt: message,
previous_response_id: previousResponseId
})
});
const data = await response.json();
return [data.response || "No response received", data.response_id || null];
} catch (error) {
return [`Error: ${error}`, null];
}
}
async function searchDocuments(query: string, limit: number = DEFAULT_SEARCH_LIMIT) {
try {
const response = await fetch(SEARCH_URL, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ query, limit })
});
const data = await response.json();
return data.results || [];
} catch (error) {
return [];
}
}
// Interactive chat with session continuity and search
let previousResponseId = null;
const readline = require('readline');
const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
const askQuestion = () => {
rl.question("Your question (or 'search <query>' to search): ", async (question) => {
if (question.toLowerCase() === 'quit' || question.toLowerCase() === 'exit' || question.toLowerCase() === 'q') {
console.log("Goodbye!");
rl.close();
return;
}
if (!question.trim()) {
askQuestion();
return;
}
if (question.toLowerCase().startsWith('search ')) {
const query = question.substring(7).trim();
console.log("Searching documents...");
const results = await searchDocuments(query);
console.log(`\nFound ${results.length} results:`);
results.forEach((result, i) => {
const filename = result.filename || 'Unknown';
const text = result.text?.substring(0, 100) || '';
console.log(`${i + 1}. ${filename}: ${text}...`);
});
console.log();
} else {
console.log("OpenRAG is thinking...");
const [result, responseId] = await chatWithOpenRAG(question, previousResponseId);
console.log(`\nOpenRAG: ${result}\n`);
previousResponseId = responseId;
}
askQuestion();
});
};
console.log("OpenRAG Chat Interface");
console.log("Ask questions about your documents. Type 'quit' to exit.");
console.log("Use 'search <query>' to search documents directly.\n");
askQuestion();
```
</TabItem>
</Tabs>
<details closed>
<summary>Example response</summary>
```
Your question (or 'search <query>' to search): search healthcare
Searching documents...
Found 5 results:
1. 2506.08231v1.pdf: variables with high performance metrics. These variables might also require fewer replication analys...
2. 2506.08231v1.pdf: on EHR data and may lack the clinical domain knowledge needed to perform well on the tasks where EHR...
3. 2506.08231v1.pdf: Abstract Large language models (LLMs) are increasingly used to extract clinical data from electronic...
4. 2506.08231v1.pdf: Acknowledgements Darren Johnson for support in publication planning and management. The authors used...
5. 2506.08231v1.pdf: Ensuring Reliability of Curated EHR-Derived Data: The Validation of Accuracy for LLM/ML-Extracted In...
Your question (or 'search <query>' to search): what's the weather today?
OpenRAG is thinking...
OpenRAG: I don't have access to real-time weather data. Could you please provide me with your location? Then I can help you find the weather information.
Your question (or 'search <query>' to search): newark nj
OpenRAG is thinking...
```
</details>
## Next steps
TBD

View file

@ -1,6 +1,6 @@
--- ---
title: What is OpenRAG? title: What is OpenRAG?
slug: /what-is-openrag slug: /
--- ---
OpenRAG is an open-source package for building agentic RAG systems. OpenRAG is an open-source package for building agentic RAG systems.

View file

@ -71,7 +71,7 @@ const config = {
logo: { logo: {
alt: 'OpenRAG Logo', alt: 'OpenRAG Logo',
src: 'img/logo.svg', src: 'img/logo.svg',
href: 'what-is-openrag', href: '/',
}, },
items: [ items: [
{ {
@ -89,7 +89,7 @@ const config = {
items: [ items: [
{ {
label: 'Getting Started', label: 'Getting Started',
to: 'what-is-openrag', to: '/',
}, },
], ],
}, },

View file

@ -25,6 +25,12 @@ const sidebars = {
id: "get-started/what-is-openrag", id: "get-started/what-is-openrag",
label: "Introduction" label: "Introduction"
}, },
{
type: "doc",
id: "get-started/quickstart",
label: "Quickstart"
},
{ {
type: "doc", type: "doc",
id: "get-started/docker", id: "get-started/docker",

Binary file not shown.

After

Width:  |  Height:  |  Size: 951 KiB

File diff suppressed because one or more lines are too long

View file

@ -10,9 +10,13 @@
"cssVariables": true, "cssVariables": true,
"prefix": "" "prefix": ""
}, },
"iconLibrary": "lucide",
"aliases": { "aliases": {
"components": "components", "components": "components",
"utils": "lib/utils", "utils": "lib/utils",
"ui": "components/ui" "ui": "components/ui"
},
"registries": {
"@magicui": "https://magicui.design/r/{name}.json"
} }
} }

View file

@ -11,7 +11,7 @@ export default function IBMLogo(props: React.SVGProps<SVGSVGElement>) {
<title>IBM Logo</title> <title>IBM Logo</title>
<path <path
d="M15.696 10.9901C15.7213 10.9901 15.7356 10.979 15.7356 10.9552V10.9313C15.7356 10.9076 15.7213 10.8964 15.696 10.8964H15.6359V10.9901H15.696ZM15.6359 11.1649H15.5552V10.8329H15.7055C15.7799 10.8329 15.8179 10.8773 15.8179 10.9378C15.8179 10.9901 15.7942 11.0235 15.7577 11.0378L15.8321 11.1649H15.7436L15.6818 11.0504H15.6359V11.1649ZM15.9255 11.0171V10.9759C15.9255 10.8424 15.821 10.7376 15.6833 10.7376C15.5456 10.7376 15.4412 10.8424 15.4412 10.9759V11.0171C15.4412 11.1505 15.5456 11.2554 15.6833 11.2554C15.821 11.2554 15.9255 11.1505 15.9255 11.0171ZM15.3668 10.9964C15.3668 10.8107 15.5077 10.6693 15.6833 10.6693C15.859 10.6693 16 10.8107 16 10.9964C16 11.1823 15.859 11.3237 15.6833 11.3237C15.5077 11.3237 15.3668 11.1823 15.3668 10.9964ZM10.8069 5.74885L10.6627 5.33301H8.28904V5.74885H10.8069ZM11.0821 6.54285L10.9379 6.12691H8.28904V6.54285H11.0821ZM12.8481 11.3067H14.9203V10.8908H12.8481V11.3067ZM12.8481 10.5126H14.9203V10.0968H12.8481V10.5126ZM12.8481 9.71873H14.0914V9.3028H12.8481V9.71873ZM12.8481 8.92474H14.0914V8.50889H12.8481V8.92474ZM12.8481 8.13084H14.0914V7.7149H11.7212L11.6047 8.05102L11.4882 7.7149H9.11794V8.13084H10.3613V7.74863L10.4951 8.13084H12.7143L12.8481 7.74863V8.13084ZM14.0914 6.921H11.9964L11.8522 7.33675H14.0914V6.921ZM9.11794 8.92474H10.3613V8.50889H9.11794V8.92474ZM9.11794 9.71873H10.3613V9.3028H9.11794V9.71873ZM8.28904 10.5126H10.3613V10.0968H8.28904V10.5126ZM8.28904 11.3067H10.3613V10.8908H8.28904V11.3067ZM12.5466 5.33301L12.4025 5.74885H14.9203V5.33301H12.5466ZM12.1273 6.54285H14.9203V6.12691H12.2714L12.1273 6.54285ZM9.11794 7.33675H11.3572L11.213 6.921H9.11794V7.33675ZM10.7727 8.92474H12.4366L12.5821 8.50889H10.6272L10.7727 8.92474ZM11.0505 9.71873H12.1588L12.3042 9.3028H10.9051L11.0505 9.71873ZM11.3283 10.5126H11.881L12.0265 10.0969H11.1828L11.3283 10.5126ZM11.604 11.3067L11.7487 10.8908H11.4606L11.604 11.3067ZM3.31561 11.3026L6.36754 11.3067C6.78195 11.3067 7.15365 11.1491 7.43506 10.8908H3.31561V11.3026ZM6.55592 9.3028V9.71873H7.94994C7.94994 9.57477 7.93029 9.43551 7.89456 9.3028H6.55592ZM4.14452 9.71873H5.38783V9.3028H4.14452V9.71873ZM6.55592 7.33675H7.89456C7.93029 7.20422 7.94994 7.06486 7.94994 6.921H6.55592V7.33675ZM4.14452 7.33675H5.38783V6.9209H4.14452V7.33675ZM6.36754 5.33301H3.31561V5.74885H7.43506C7.15365 5.49061 6.77892 5.33301 6.36754 5.33301ZM7.73778 6.12691H3.31561V6.54285H7.90448C7.86839 6.39502 7.81172 6.25539 7.73778 6.12691ZM4.14452 7.7149V8.13084H7.39152C7.5292 8.01333 7.64621 7.87268 7.73732 7.7149H4.14452ZM7.39152 8.50889H4.14452V8.92474H7.73732C7.64621 8.76695 7.5292 8.62631 7.39152 8.50889ZM3.31561 10.5126H7.73778C7.81172 10.3843 7.86839 10.2447 7.90448 10.0969H3.31561V10.5126ZM0 5.74885H2.90121V5.33301H0V5.74885ZM0 6.54285H2.90121V6.12691H0V6.54285ZM0.828996 7.33684H2.0723V6.921H0.828996V7.33684ZM0.828996 8.13084H2.0723V7.7149H0.828996V8.13084ZM0.828996 8.92474H2.0723V8.50889H0.828996V8.92474ZM0.828996 9.71873H2.0723V9.3028H0.828996V9.71873ZM0 10.5126H2.90121V10.0968H0V10.5126ZM0 11.3067H2.90121V10.8908H0V11.3067Z" d="M15.696 10.9901C15.7213 10.9901 15.7356 10.979 15.7356 10.9552V10.9313C15.7356 10.9076 15.7213 10.8964 15.696 10.8964H15.6359V10.9901H15.696ZM15.6359 11.1649H15.5552V10.8329H15.7055C15.7799 10.8329 15.8179 10.8773 15.8179 10.9378C15.8179 10.9901 15.7942 11.0235 15.7577 11.0378L15.8321 11.1649H15.7436L15.6818 11.0504H15.6359V11.1649ZM15.9255 11.0171V10.9759C15.9255 10.8424 15.821 10.7376 15.6833 10.7376C15.5456 10.7376 15.4412 10.8424 15.4412 10.9759V11.0171C15.4412 11.1505 15.5456 11.2554 15.6833 11.2554C15.821 11.2554 15.9255 11.1505 15.9255 11.0171ZM15.3668 10.9964C15.3668 10.8107 15.5077 10.6693 15.6833 10.6693C15.859 10.6693 16 10.8107 16 10.9964C16 11.1823 15.859 11.3237 15.6833 11.3237C15.5077 11.3237 15.3668 11.1823 15.3668 10.9964ZM10.8069 5.74885L10.6627 5.33301H8.28904V5.74885H10.8069ZM11.0821 6.54285L10.9379 6.12691H8.28904V6.54285H11.0821ZM12.8481 11.3067H14.9203V10.8908H12.8481V11.3067ZM12.8481 10.5126H14.9203V10.0968H12.8481V10.5126ZM12.8481 9.71873H14.0914V9.3028H12.8481V9.71873ZM12.8481 8.92474H14.0914V8.50889H12.8481V8.92474ZM12.8481 8.13084H14.0914V7.7149H11.7212L11.6047 8.05102L11.4882 7.7149H9.11794V8.13084H10.3613V7.74863L10.4951 8.13084H12.7143L12.8481 7.74863V8.13084ZM14.0914 6.921H11.9964L11.8522 7.33675H14.0914V6.921ZM9.11794 8.92474H10.3613V8.50889H9.11794V8.92474ZM9.11794 9.71873H10.3613V9.3028H9.11794V9.71873ZM8.28904 10.5126H10.3613V10.0968H8.28904V10.5126ZM8.28904 11.3067H10.3613V10.8908H8.28904V11.3067ZM12.5466 5.33301L12.4025 5.74885H14.9203V5.33301H12.5466ZM12.1273 6.54285H14.9203V6.12691H12.2714L12.1273 6.54285ZM9.11794 7.33675H11.3572L11.213 6.921H9.11794V7.33675ZM10.7727 8.92474H12.4366L12.5821 8.50889H10.6272L10.7727 8.92474ZM11.0505 9.71873H12.1588L12.3042 9.3028H10.9051L11.0505 9.71873ZM11.3283 10.5126H11.881L12.0265 10.0969H11.1828L11.3283 10.5126ZM11.604 11.3067L11.7487 10.8908H11.4606L11.604 11.3067ZM3.31561 11.3026L6.36754 11.3067C6.78195 11.3067 7.15365 11.1491 7.43506 10.8908H3.31561V11.3026ZM6.55592 9.3028V9.71873H7.94994C7.94994 9.57477 7.93029 9.43551 7.89456 9.3028H6.55592ZM4.14452 9.71873H5.38783V9.3028H4.14452V9.71873ZM6.55592 7.33675H7.89456C7.93029 7.20422 7.94994 7.06486 7.94994 6.921H6.55592V7.33675ZM4.14452 7.33675H5.38783V6.9209H4.14452V7.33675ZM6.36754 5.33301H3.31561V5.74885H7.43506C7.15365 5.49061 6.77892 5.33301 6.36754 5.33301ZM7.73778 6.12691H3.31561V6.54285H7.90448C7.86839 6.39502 7.81172 6.25539 7.73778 6.12691ZM4.14452 7.7149V8.13084H7.39152C7.5292 8.01333 7.64621 7.87268 7.73732 7.7149H4.14452ZM7.39152 8.50889H4.14452V8.92474H7.73732C7.64621 8.76695 7.5292 8.62631 7.39152 8.50889ZM3.31561 10.5126H7.73778C7.81172 10.3843 7.86839 10.2447 7.90448 10.0969H3.31561V10.5126ZM0 5.74885H2.90121V5.33301H0V5.74885ZM0 6.54285H2.90121V6.12691H0V6.54285ZM0.828996 7.33684H2.0723V6.921H0.828996V7.33684ZM0.828996 8.13084H2.0723V7.7149H0.828996V8.13084ZM0.828996 8.92474H2.0723V8.50889H0.828996V8.92474ZM0.828996 9.71873H2.0723V9.3028H0.828996V9.71873ZM0 10.5126H2.90121V10.0968H0V10.5126ZM0 11.3067H2.90121V10.8908H0V11.3067Z"
fill="#A1A1AA" fill="currentColor"
/> />
</svg> </svg>
); );

View file

@ -23,7 +23,7 @@ export default function OpenAILogo(props: React.SVGProps<SVGSVGElement>) {
<g mask="url(#mask0_2162_638)"> <g mask="url(#mask0_2162_638)">
<path <path
d="M6.52523 5.82397V4.30397C6.52523 4.17595 6.57329 4.07991 6.68523 4.01599L9.74132 2.25599C10.1573 2.016 10.6533 1.90406 11.1653 1.90406C13.0852 1.90406 14.3013 3.39209 14.3013 4.97602C14.3013 5.088 14.3013 5.21601 14.2853 5.34403L11.1172 3.48799C10.9253 3.37605 10.7332 3.37605 10.5412 3.48799L6.52523 5.82397ZM13.6612 11.744V8.11194C13.6612 7.88789 13.5652 7.7279 13.3732 7.61592L9.35724 5.27993L10.6692 4.52788C10.7812 4.46396 10.8772 4.46396 10.9892 4.52788L14.0453 6.28788C14.9254 6.79995 15.5173 7.88789 15.5173 8.94382C15.5173 10.1598 14.7973 11.2798 13.6612 11.7439V11.744ZM5.58124 8.54404L4.26924 7.77608C4.1573 7.71216 4.10925 7.61609 4.10925 7.48807V3.9681C4.10925 2.25616 5.42125 0.960064 7.19729 0.960064C7.86938 0.960064 8.49325 1.18412 9.02138 1.5841L5.86938 3.40816C5.67744 3.5201 5.58141 3.6801 5.58141 3.90418V8.54417L5.58124 8.54404ZM8.40528 10.176L6.52523 9.12002V6.88011L8.40528 5.82414L10.2852 6.88011V9.12002L8.40528 10.176ZM9.61327 15.0401C8.94122 15.0401 8.31735 14.816 7.78921 14.4161L10.9412 12.592C11.1331 12.48 11.2292 12.32 11.2292 12.096V7.45596L12.5573 8.22392C12.6692 8.28784 12.7172 8.38388 12.7172 8.51193V12.0319C12.7172 13.7438 11.3892 15.0399 9.61327 15.0399V15.0401ZM5.82123 11.4721L2.76514 9.71212C1.88507 9.20002 1.29315 8.11211 1.29315 7.05614C1.29315 5.82414 2.02916 4.72016 3.16509 4.25611V7.9041C3.16509 8.12815 3.26116 8.28814 3.4531 8.40012L7.45319 10.72L6.14119 11.4721C6.02925 11.536 5.93318 11.536 5.82123 11.4721ZM5.64533 14.0961C3.83731 14.0961 2.50928 12.7361 2.50928 11.0561C2.50928 10.928 2.52532 10.8 2.54122 10.672L5.69322 12.4961C5.88516 12.608 6.07726 12.608 6.2692 12.4961L10.2852 10.1762V11.6962C10.2852 11.8242 10.2372 11.9202 10.1252 11.9841L7.06914 13.7441C6.65312 13.9841 6.15709 14.0961 5.64517 14.0961H5.64533ZM9.61327 16C11.5493 16 13.1652 14.624 13.5334 12.8C15.3253 12.3359 16.4773 10.6559 16.4773 8.94399C16.4773 7.82393 15.9974 6.73602 15.1334 5.95199C15.2134 5.61596 15.2614 5.27994 15.2614 4.94407C15.2614 2.65611 13.4053 0.943991 11.2613 0.943991C10.8294 0.943991 10.4134 1.00792 9.99735 1.152C9.27724 0.44797 8.28523 0 7.19729 0C5.26129 0 3.64537 1.37592 3.27724 3.19998C1.48526 3.66402 0.333252 5.34403 0.333252 7.05598C0.333252 8.17603 0.8132 9.26395 1.67723 10.048C1.59723 10.384 1.54921 10.72 1.54921 11.0559C1.54921 13.3439 3.40525 15.056 5.54926 15.056C5.98119 15.056 6.39722 14.9921 6.81324 14.848C7.53318 15.552 8.52519 16 9.61327 16Z" d="M6.52523 5.82397V4.30397C6.52523 4.17595 6.57329 4.07991 6.68523 4.01599L9.74132 2.25599C10.1573 2.016 10.6533 1.90406 11.1653 1.90406C13.0852 1.90406 14.3013 3.39209 14.3013 4.97602C14.3013 5.088 14.3013 5.21601 14.2853 5.34403L11.1172 3.48799C10.9253 3.37605 10.7332 3.37605 10.5412 3.48799L6.52523 5.82397ZM13.6612 11.744V8.11194C13.6612 7.88789 13.5652 7.7279 13.3732 7.61592L9.35724 5.27993L10.6692 4.52788C10.7812 4.46396 10.8772 4.46396 10.9892 4.52788L14.0453 6.28788C14.9254 6.79995 15.5173 7.88789 15.5173 8.94382C15.5173 10.1598 14.7973 11.2798 13.6612 11.7439V11.744ZM5.58124 8.54404L4.26924 7.77608C4.1573 7.71216 4.10925 7.61609 4.10925 7.48807V3.9681C4.10925 2.25616 5.42125 0.960064 7.19729 0.960064C7.86938 0.960064 8.49325 1.18412 9.02138 1.5841L5.86938 3.40816C5.67744 3.5201 5.58141 3.6801 5.58141 3.90418V8.54417L5.58124 8.54404ZM8.40528 10.176L6.52523 9.12002V6.88011L8.40528 5.82414L10.2852 6.88011V9.12002L8.40528 10.176ZM9.61327 15.0401C8.94122 15.0401 8.31735 14.816 7.78921 14.4161L10.9412 12.592C11.1331 12.48 11.2292 12.32 11.2292 12.096V7.45596L12.5573 8.22392C12.6692 8.28784 12.7172 8.38388 12.7172 8.51193V12.0319C12.7172 13.7438 11.3892 15.0399 9.61327 15.0399V15.0401ZM5.82123 11.4721L2.76514 9.71212C1.88507 9.20002 1.29315 8.11211 1.29315 7.05614C1.29315 5.82414 2.02916 4.72016 3.16509 4.25611V7.9041C3.16509 8.12815 3.26116 8.28814 3.4531 8.40012L7.45319 10.72L6.14119 11.4721C6.02925 11.536 5.93318 11.536 5.82123 11.4721ZM5.64533 14.0961C3.83731 14.0961 2.50928 12.7361 2.50928 11.0561C2.50928 10.928 2.52532 10.8 2.54122 10.672L5.69322 12.4961C5.88516 12.608 6.07726 12.608 6.2692 12.4961L10.2852 10.1762V11.6962C10.2852 11.8242 10.2372 11.9202 10.1252 11.9841L7.06914 13.7441C6.65312 13.9841 6.15709 14.0961 5.64517 14.0961H5.64533ZM9.61327 16C11.5493 16 13.1652 14.624 13.5334 12.8C15.3253 12.3359 16.4773 10.6559 16.4773 8.94399C16.4773 7.82393 15.9974 6.73602 15.1334 5.95199C15.2134 5.61596 15.2614 5.27994 15.2614 4.94407C15.2614 2.65611 13.4053 0.943991 11.2613 0.943991C10.8294 0.943991 10.4134 1.00792 9.99735 1.152C9.27724 0.44797 8.28523 0 7.19729 0C5.26129 0 3.64537 1.37592 3.27724 3.19998C1.48526 3.66402 0.333252 5.34403 0.333252 7.05598C0.333252 8.17603 0.8132 9.26395 1.67723 10.048C1.59723 10.384 1.54921 10.72 1.54921 11.0559C1.54921 13.3439 3.40525 15.056 5.54926 15.056C5.98119 15.056 6.39722 14.9921 6.81324 14.848C7.53318 15.552 8.52519 16 9.61327 16Z"
fill="white" fill="currentColor"
/> />
</g> </g>
</svg> </svg>

View file

@ -0,0 +1,158 @@
"use client";
import { motion } from "motion/react";
import type React from "react";
import { useEffect, useId, useRef, useState } from "react";
import { cn } from "@/lib/utils";
/**
* DotPattern Component Props
*
* @param {number} [width=16] - The horizontal spacing between dots
* @param {number} [height=16] - The vertical spacing between dots
* @param {number} [x=0] - The x-offset of the entire pattern
* @param {number} [y=0] - The y-offset of the entire pattern
* @param {number} [cx=1] - The x-offset of individual dots
* @param {number} [cy=1] - The y-offset of individual dots
* @param {number} [cr=1] - The radius of each dot
* @param {string} [className] - Additional CSS classes to apply to the SVG container
* @param {boolean} [glow=false] - Whether dots should have a glowing animation effect
*/
interface DotPatternProps extends React.SVGProps<SVGSVGElement> {
width?: number;
height?: number;
x?: number;
y?: number;
cx?: number;
cy?: number;
cr?: number;
className?: string;
glow?: boolean;
[key: string]: unknown;
}
/**
* DotPattern Component
*
* A React component that creates an animated or static dot pattern background using SVG.
* The pattern automatically adjusts to fill its container and can optionally display glowing dots.
*
* @component
*
* @see DotPatternProps for the props interface.
*
* @example
* // Basic usage
* <DotPattern />
*
* // With glowing effect and custom spacing
* <DotPattern
* width={20}
* height={20}
* glow={true}
* className="opacity-50"
* />
*
* @notes
* - The component is client-side only ("use client")
* - Automatically responds to container size changes
* - When glow is enabled, dots will animate with random delays and durations
* - Uses Motion for animations
* - Dots color can be controlled via the text color utility classes
*/
export function DotPattern({
width = 16,
height = 16,
x = 0,
y = 0,
cx = 1,
cy = 1,
cr = 1,
className,
glow = false,
...props
}: DotPatternProps) {
const id = useId();
const containerRef = useRef<SVGSVGElement>(null);
const [dimensions, setDimensions] = useState({ width: 0, height: 0 });
useEffect(() => {
const updateDimensions = () => {
if (containerRef.current) {
const { width, height } = containerRef.current.getBoundingClientRect();
setDimensions({ width, height });
}
};
updateDimensions();
window.addEventListener("resize", updateDimensions);
return () => window.removeEventListener("resize", updateDimensions);
}, []);
const dots = Array.from(
{
length:
Math.ceil(dimensions.width / width) *
Math.ceil(dimensions.height / height),
},
(_, i) => {
const col = i % Math.ceil(dimensions.width / width);
const row = Math.floor(i / Math.ceil(dimensions.width / width));
return {
x: col * width + cx,
y: row * height + cy,
delay: Math.random() * 5,
duration: Math.random() * 3 + 2,
};
},
);
return (
<svg
ref={containerRef}
aria-hidden="true"
className={cn(
"pointer-events-none absolute inset-0 h-full w-full text-neutral-400/80",
className,
)}
{...props}
>
<defs>
<radialGradient id={`${id}-gradient`}>
<stop offset="0%" stopColor="currentColor" stopOpacity="1" />
<stop offset="100%" stopColor="currentColor" stopOpacity="0" />
</radialGradient>
</defs>
{dots.map((dot, index) => (
<motion.circle
key={`${dot.x}-${dot.y}`}
cx={dot.x}
cy={dot.y}
r={cr}
fill={glow ? `url(#${id}-gradient)` : "currentColor"}
initial={glow ? { opacity: 0.4, scale: 1 } : {}}
animate={
glow
? {
opacity: [0.4, 1, 0.4],
scale: [1, 1.5, 1],
}
: {}
}
transition={
glow
? {
duration: dot.duration,
repeat: Infinity,
repeatType: "reverse",
delay: dot.delay,
ease: "easeInOut",
}
: {}
}
/>
))}
</svg>
);
}

View file

@ -1,3 +1,4 @@
import { Eye, EyeOff } from "lucide-react";
import * as React from "react"; import * as React from "react";
import { cn } from "@/lib/utils"; import { cn } from "@/lib/utils";
@ -12,6 +13,11 @@ const Input = React.forwardRef<HTMLInputElement, InputProps>(
const [hasValue, setHasValue] = React.useState( const [hasValue, setHasValue] = React.useState(
Boolean(props.value || props.defaultValue), Boolean(props.value || props.defaultValue),
); );
const [showPassword, setShowPassword] = React.useState(false);
const handleTogglePassword = () => {
setShowPassword(!showPassword);
};
const handleChange = (e: React.ChangeEvent<HTMLInputElement>) => { const handleChange = (e: React.ChangeEvent<HTMLInputElement>) => {
setHasValue(e.target.value.length > 0); setHasValue(e.target.value.length > 0);
@ -23,8 +29,8 @@ const Input = React.forwardRef<HTMLInputElement, InputProps>(
return ( return (
<label <label
className={cn( className={cn(
"relative block h-fit w-full text-sm", "relative block h-fit w-full text-sm group",
icon ? className : "" icon ? className : "",
)} )}
> >
{icon && ( {icon && (
@ -34,17 +40,32 @@ const Input = React.forwardRef<HTMLInputElement, InputProps>(
)} )}
<input <input
autoComplete="off" autoComplete="off"
type={type} type={type === "password" && showPassword ? "text" : type}
placeholder={placeholder} placeholder={placeholder}
className={cn( className={cn(
"primary-input !placeholder-transparent", "primary-input !placeholder-transparent",
icon && "pl-9", icon && "pl-9",
icon ? inputClassName : className type === "password" && "!pr-8",
icon ? inputClassName : className,
)} )}
ref={ref} ref={ref}
{...props} {...props}
onChange={handleChange} onChange={handleChange}
/> />
{type === "password" && (
<button
type="button"
className="absolute top-1/2 opacity-0 group-hover:opacity-100 hover:text-primary transition-all right-3 transform -translate-y-1/2 text-sm text-muted-foreground"
onMouseDown={(e) => e.preventDefault()}
onMouseUp={handleTogglePassword}
>
{showPassword ? (
<Eye className="w-4" />
) : (
<EyeOff className="w-4" />
)}
</button>
)}
<span <span
className={cn( className={cn(
"pointer-events-none absolute top-1/2 -translate-y-1/2 pl-px text-placeholder-foreground font-mono", "pointer-events-none absolute top-1/2 -translate-y-1/2 pl-px text-placeholder-foreground font-mono",
@ -56,7 +77,7 @@ const Input = React.forwardRef<HTMLInputElement, InputProps>(
</span> </span>
</label> </label>
); );
} },
); );
Input.displayName = "Input"; Input.displayName = "Input";

View file

@ -2,7 +2,7 @@
import * as React from "react" import * as React from "react"
import * as SelectPrimitive from "@radix-ui/react-select" import * as SelectPrimitive from "@radix-ui/react-select"
import { Check, ChevronDown, ChevronUp } from "lucide-react" import { Check, ChevronDown, ChevronUp, Lock } from "lucide-react"
import { cn } from "@/lib/utils" import { cn } from "@/lib/utils"
@ -15,18 +15,24 @@ const SelectValue = SelectPrimitive.Value
const SelectTrigger = React.forwardRef< const SelectTrigger = React.forwardRef<
React.ElementRef<typeof SelectPrimitive.Trigger>, React.ElementRef<typeof SelectPrimitive.Trigger>,
React.ComponentPropsWithoutRef<typeof SelectPrimitive.Trigger> React.ComponentPropsWithoutRef<typeof SelectPrimitive.Trigger>
>(({ className, children, ...props }, ref) => ( >(({ className, children, disabled, ...props }, ref) => (
<SelectPrimitive.Trigger <SelectPrimitive.Trigger
ref={ref} ref={ref}
className={cn( className={cn(
"flex h-10 w-full items-center justify-between rounded-md border border-input bg-background px-3 py-2 text-sm ring-offset-background placeholder:text-muted-foreground focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2 disabled:cursor-not-allowed disabled:opacity-50 [&>span]:line-clamp-1", "flex h-10 w-full items-center justify-between rounded-md border border-input bg-background px-3 py-2 text-sm ring-offset-background placeholder:text-muted-foreground focus:outline-none focus:ring-2 focus:ring-ring focus:ring-offset-2 disabled:cursor-not-allowed disabled:opacity-50 [&>span]:line-clamp-1",
disabled && "bg-muted",
className className
)} )}
disabled={disabled}
{...props} {...props}
> >
{children} {children}
<SelectPrimitive.Icon asChild> <SelectPrimitive.Icon asChild>
<ChevronDown className="h-4 w-4 opacity-50" /> {disabled ? (
<Lock className="h-4 w-4 opacity-50" />
) : (
<ChevronDown className="h-4 w-4 opacity-50" />
)}
</SelectPrimitive.Icon> </SelectPrimitive.Icon>
</SelectPrimitive.Trigger> </SelectPrimitive.Trigger>
)) ))

Binary file not shown.

Before

Width:  |  Height:  |  Size: 269 KiB

View file

@ -90,7 +90,6 @@ export const useGetOllamaModelsQuery = (
queryKey: ["models", "ollama", params], queryKey: ["models", "ollama", params],
queryFn: getOllamaModels, queryFn: getOllamaModels,
retry: 2, retry: 2,
enabled: !!params?.endpoint, // Only run if endpoint is provided
staleTime: 0, // Always fetch fresh data staleTime: 0, // Always fetch fresh data
gcTime: 0, // Don't cache results gcTime: 0, // Don't cache results
...options, ...options,

View file

@ -6,7 +6,9 @@ import { Suspense, useEffect } from "react";
import GoogleLogo from "@/components/logo/google-logo"; import GoogleLogo from "@/components/logo/google-logo";
import Logo from "@/components/logo/logo"; import Logo from "@/components/logo/logo";
import { Button } from "@/components/ui/button"; import { Button } from "@/components/ui/button";
import { DotPattern } from "@/components/ui/dot-pattern";
import { useAuth } from "@/contexts/auth-context"; import { useAuth } from "@/contexts/auth-context";
import { cn } from "@/lib/utils";
import { useGetSettingsQuery } from "../api/queries/useGetSettingsQuery"; import { useGetSettingsQuery } from "../api/queries/useGetSettingsQuery";
function LoginPageContent() { function LoginPageContent() {
@ -53,15 +55,19 @@ function LoginPageContent() {
} }
return ( return (
<div <div className="min-h-dvh relative flex gap-4 flex-col items-center justify-center bg-background p-4">
className="min-h-dvh relative flex gap-4 flex-col items-center justify-center bg-background p-4" <DotPattern
style={{ width={24}
backgroundImage: "url('/images/background.png')", height={24}
backgroundSize: "cover", cx={1}
backgroundPosition: "center", cy={1}
}} cr={1}
> className={cn(
<div className="flex flex-col items-center justify-center gap-4"> "[mask-image:linear-gradient(to_bottom,white,transparent,transparent)]",
"text-input/70",
)}
/>
<div className="flex flex-col items-center justify-center gap-4 z-10">
<Logo className="fill-primary" width={32} height={28} /> <Logo className="fill-primary" width={32} height={28} />
<h1 className="text-2xl font-medium font-chivo">Welcome to OpenRAG</h1> <h1 className="text-2xl font-medium font-chivo">Welcome to OpenRAG</h1>
<p className="text-sm text-muted-foreground"> <p className="text-sm text-muted-foreground">
@ -72,7 +78,7 @@ function LoginPageContent() {
Continue with Google Continue with Google
</Button> </Button>
</div> </div>
<div className="flex items-center justify-center gap-2 absolute bottom-6 text-xs text-muted-foreground"> <div className="flex items-center justify-center gap-2 absolute bottom-6 text-xs text-muted-foreground z-10">
<p className="text-accent-emerald-foreground">Systems Operational</p> <p className="text-accent-emerald-foreground">Systems Operational</p>
<p>Privacy Policy</p> <p>Privacy Policy</p>
</div> </div>

View file

@ -47,8 +47,7 @@ export function AdvancedOnboarding({
{hasEmbeddingModels && ( {hasEmbeddingModels && (
<LabelWrapper <LabelWrapper
label="Embedding model" label="Embedding model"
description="Its recommended that you use XYZ, ABC, or DEF models for best performance." helperText="Model used for knowledge ingest and retrieval"
helperText="The embedding model for your Ollama server."
id="embedding-model" id="embedding-model"
required={true} required={true}
> >
@ -63,8 +62,7 @@ export function AdvancedOnboarding({
{hasLanguageModels && ( {hasLanguageModels && (
<LabelWrapper <LabelWrapper
label="Language model" label="Language model"
description="Its recommended that you use XYZ, ABC, or DEF models for best performance." helperText="Model used for chat"
helperText="The embedding model for your Ollama server."
id="embedding-model" id="embedding-model"
required={true} required={true}
> >
@ -79,7 +77,7 @@ export function AdvancedOnboarding({
{(hasLanguageModels || hasEmbeddingModels) && <Separator />} {(hasLanguageModels || hasEmbeddingModels) && <Separator />}
<LabelWrapper <LabelWrapper
label="Sample dataset" label="Sample dataset"
description="Ingest two small PDFs" description="Load 2 sample PDFs to chat with data immediately."
id="sample-dataset" id="sample-dataset"
flex flex
> >

View file

@ -1,5 +1,6 @@
import { useState } from "react"; import { useState } from "react";
import { LabelInput } from "@/components/label-input"; import { LabelInput } from "@/components/label-input";
import { LabelWrapper } from "@/components/label-wrapper";
import IBMLogo from "@/components/logo/ibm-logo"; import IBMLogo from "@/components/logo/ibm-logo";
import { useDebouncedValue } from "@/lib/debounce"; import { useDebouncedValue } from "@/lib/debounce";
import type { OnboardingVariables } from "../../api/mutations/useOnboardingMutation"; import type { OnboardingVariables } from "../../api/mutations/useOnboardingMutation";
@ -7,6 +8,7 @@ import { useGetIBMModelsQuery } from "../../api/queries/useGetModelsQuery";
import { useModelSelection } from "../hooks/useModelSelection"; import { useModelSelection } from "../hooks/useModelSelection";
import { useUpdateSettings } from "../hooks/useUpdateSettings"; import { useUpdateSettings } from "../hooks/useUpdateSettings";
import { AdvancedOnboarding } from "./advanced"; import { AdvancedOnboarding } from "./advanced";
import { ModelSelector } from "./model-selector";
export function IBMOnboarding({ export function IBMOnboarding({
setSettings, setSettings,
@ -17,10 +19,42 @@ export function IBMOnboarding({
sampleDataset: boolean; sampleDataset: boolean;
setSampleDataset: (dataset: boolean) => void; setSampleDataset: (dataset: boolean) => void;
}) { }) {
const [endpoint, setEndpoint] = useState(""); const [endpoint, setEndpoint] = useState("https://us-south.ml.cloud.ibm.com");
const [apiKey, setApiKey] = useState(""); const [apiKey, setApiKey] = useState("");
const [projectId, setProjectId] = useState(""); const [projectId, setProjectId] = useState("");
const options = [
{
value: "https://us-south.ml.cloud.ibm.com",
label: "https://us-south.ml.cloud.ibm.com",
default: true,
},
{
value: "https://eu-de.ml.cloud.ibm.com",
label: "https://eu-de.ml.cloud.ibm.com",
default: false,
},
{
value: "https://eu-gb.ml.cloud.ibm.com",
label: "https://eu-gb.ml.cloud.ibm.com",
default: false,
},
{
value: "https://au-syd.ml.cloud.ibm.com",
label: "https://au-syd.ml.cloud.ibm.com",
default: false,
},
{
value: "https://jp-tok.ml.cloud.ibm.com",
label: "https://jp-tok.ml.cloud.ibm.com",
default: false,
},
{
value: "https://ca-tor.ml.cloud.ibm.com",
label: "https://ca-tor.ml.cloud.ibm.com",
default: false,
},
];
const debouncedEndpoint = useDebouncedValue(endpoint, 500); const debouncedEndpoint = useDebouncedValue(endpoint, 500);
const debouncedApiKey = useDebouncedValue(apiKey, 500); const debouncedApiKey = useDebouncedValue(apiKey, 500);
const debouncedProjectId = useDebouncedValue(projectId, 500); const debouncedProjectId = useDebouncedValue(projectId, 500);
@ -68,19 +102,26 @@ export function IBMOnboarding({
return ( return (
<> <>
<div className="space-y-4"> <div className="space-y-4">
<LabelInput <LabelWrapper
label="watsonx.ai API Endpoint" label="watsonx.ai API Endpoint"
helperText="The API endpoint for your watsonx.ai account." helperText="Base URL of the API"
id="api-endpoint" id="api-endpoint"
required required
placeholder="https://us-south.ml.cloud.ibm.com" >
value={endpoint} <ModelSelector
onChange={(e) => setEndpoint(e.target.value)} options={options}
/> value={endpoint}
onValueChange={setEndpoint}
searchPlaceholder="Search endpoint..."
noOptionsPlaceholder="No endpoints available"
placeholder="Select endpoint..."
/>
</LabelWrapper>
<LabelInput <LabelInput
label="IBM API key" label="IBM API key"
helperText="The API key for your watsonx.ai account." helperText="The API key for your watsonx.ai account."
id="api-key" id="api-key"
type="password"
required required
placeholder="your-api-key" placeholder="your-api-key"
value={apiKey} value={apiKey}
@ -102,16 +143,9 @@ export function IBMOnboarding({
)} )}
{modelsError && ( {modelsError && (
<p className="text-mmd text-accent-amber-foreground"> <p className="text-mmd text-accent-amber-foreground">
Invalid configuration or connection failed Connection failed. Check your configuration.
</p> </p>
)} )}
{modelsData &&
(modelsData.language_models?.length > 0 ||
modelsData.embedding_models?.length > 0) && (
<p className="text-mmd text-accent-emerald-foreground">
Configuration is valid
</p>
)}
</div> </div>
<AdvancedOnboarding <AdvancedOnboarding
icon={<IBMLogo className="w-4 h-4" />} icon={<IBMLogo className="w-4 h-4" />}

View file

@ -21,6 +21,9 @@ export function ModelSelector({
value, value,
onValueChange, onValueChange,
icon, icon,
placeholder = "Select model...",
searchPlaceholder = "Search model...",
noOptionsPlaceholder = "No models available",
}: { }: {
options: { options: {
value: string; value: string;
@ -29,6 +32,9 @@ export function ModelSelector({
}[]; }[];
value: string; value: string;
icon?: React.ReactNode; icon?: React.ReactNode;
placeholder?: string;
searchPlaceholder?: string;
noOptionsPlaceholder?: string;
onValueChange: (value: string) => void; onValueChange: (value: string) => void;
}) { }) {
const [open, setOpen] = useState(false); const [open, setOpen] = useState(false);
@ -50,7 +56,7 @@ export function ModelSelector({
> >
{value ? ( {value ? (
<div className="flex items-center gap-2"> <div className="flex items-center gap-2">
<div className="w-4 h-4">{icon}</div> {icon && <div className="w-4 h-4">{icon}</div>}
{options.find((framework) => framework.value === value)?.label} {options.find((framework) => framework.value === value)?.label}
{options.find((framework) => framework.value === value) {options.find((framework) => framework.value === value)
?.default && ( ?.default && (
@ -60,18 +66,18 @@ export function ModelSelector({
)} )}
</div> </div>
) : options.length === 0 ? ( ) : options.length === 0 ? (
"No models available" noOptionsPlaceholder
) : ( ) : (
"Select model..." placeholder
)} )}
<ChevronsUpDownIcon className="ml-2 h-4 w-4 shrink-0 opacity-50" /> <ChevronsUpDownIcon className="ml-2 h-4 w-4 shrink-0 opacity-50" />
</Button> </Button>
</PopoverTrigger> </PopoverTrigger>
<PopoverContent align="start" className="w-[400px] p-0"> <PopoverContent align="start" className="w-[400px] p-0">
<Command> <Command>
<CommandInput placeholder="Search model..." /> <CommandInput placeholder={searchPlaceholder} />
<CommandList> <CommandList>
<CommandEmpty>No model found.</CommandEmpty> <CommandEmpty>{noOptionsPlaceholder}</CommandEmpty>
<CommandGroup> <CommandGroup>
{options.map((option) => ( {options.map((option) => (
<CommandItem <CommandItem

View file

@ -1,4 +1,4 @@
import { useState } from "react"; import { useEffect, useState } from "react";
import { LabelInput } from "@/components/label-input"; import { LabelInput } from "@/components/label-input";
import { LabelWrapper } from "@/components/label-wrapper"; import { LabelWrapper } from "@/components/label-wrapper";
import OllamaLogo from "@/components/logo/ollama-logo"; import OllamaLogo from "@/components/logo/ollama-logo";
@ -19,7 +19,8 @@ export function OllamaOnboarding({
sampleDataset: boolean; sampleDataset: boolean;
setSampleDataset: (dataset: boolean) => void; setSampleDataset: (dataset: boolean) => void;
}) { }) {
const [endpoint, setEndpoint] = useState(""); const [endpoint, setEndpoint] = useState("http://localhost:11434");
const [showConnecting, setShowConnecting] = useState(false);
const debouncedEndpoint = useDebouncedValue(endpoint, 500); const debouncedEndpoint = useDebouncedValue(endpoint, 500);
// Fetch models from API when endpoint is provided (debounced) // Fetch models from API when endpoint is provided (debounced)
@ -41,6 +42,25 @@ export function OllamaOnboarding({
embeddingModels, embeddingModels,
} = useModelSelection(modelsData); } = useModelSelection(modelsData);
// Handle delayed display of connecting state
useEffect(() => {
let timeoutId: NodeJS.Timeout;
if (debouncedEndpoint && isLoadingModels) {
timeoutId = setTimeout(() => {
setShowConnecting(true);
}, 500);
} else {
setShowConnecting(false);
}
return () => {
if (timeoutId) {
clearTimeout(timeoutId);
}
};
}, [debouncedEndpoint, isLoadingModels]);
const handleSampleDatasetChange = (dataset: boolean) => { const handleSampleDatasetChange = (dataset: boolean) => {
setSampleDataset(dataset); setSampleDataset(dataset);
}; };
@ -57,74 +77,75 @@ export function OllamaOnboarding({
); );
// Check validation state based on models query // Check validation state based on models query
const isConnecting = debouncedEndpoint && isLoadingModels;
const hasConnectionError = debouncedEndpoint && modelsError; const hasConnectionError = debouncedEndpoint && modelsError;
const hasNoModels = const hasNoModels =
modelsData && modelsData &&
!modelsData.language_models?.length && !modelsData.language_models?.length &&
!modelsData.embedding_models?.length; !modelsData.embedding_models?.length;
const isValidConnection =
modelsData &&
(modelsData.language_models?.length > 0 ||
modelsData.embedding_models?.length > 0);
return ( return (
<> <>
<div className="space-y-4"> <div className="space-y-4">
<div className="space-y-1"> <div className="space-y-1">
<LabelInput <LabelInput
label="Ollama Endpoint" label="Ollama Base URL"
helperText="The endpoint for your Ollama server." helperText="Base URL of your Ollama server"
id="api-endpoint" id="api-endpoint"
required required
placeholder="http://localhost:11434" placeholder="http://localhost:11434"
value={endpoint} value={endpoint}
onChange={(e) => setEndpoint(e.target.value)} onChange={(e) => setEndpoint(e.target.value)}
/> />
{isConnecting && ( {showConnecting && (
<p className="text-mmd text-muted-foreground"> <p className="text-mmd text-muted-foreground">
Connecting to Ollama server... Connecting to Ollama server...
</p> </p>
)} )}
{hasConnectionError && ( {hasConnectionError && (
<p className="text-mmd text-accent-amber-foreground"> <p className="text-mmd text-accent-amber-foreground">
Cant reach Ollama at {debouncedEndpoint}. Update the endpoint or Cant reach Ollama at {debouncedEndpoint}. Update the base URL or
start the server. start the server.
</p> </p>
)} )}
{hasNoModels && ( {hasNoModels && (
<p className="text-mmd text-accent-amber-foreground"> <p className="text-mmd text-accent-amber-foreground">
No models found. Please install some models on your Ollama server. No models found. Install embedding and agent models on your Ollama
</p> server.
)}
{isValidConnection && (
<p className="text-mmd text-accent-emerald-foreground">
Connected successfully
</p> </p>
)} )}
</div> </div>
<LabelWrapper <LabelWrapper
label="Embedding model" label="Embedding model"
helperText="The embedding model for your Ollama server." helperText="Model used for knowledge ingest and retrieval"
id="embedding-model" id="embedding-model"
required={true} required={true}
> >
<ModelSelector <ModelSelector
options={embeddingModels} options={embeddingModels}
icon={<OllamaLogo className="w-4 h-4" />} icon={<OllamaLogo className="w-4 h-4" />}
noOptionsPlaceholder={
isLoadingModels
? "Loading models..."
: "No embedding models detected. Install an embedding model to continue."
}
value={embeddingModel} value={embeddingModel}
onValueChange={setEmbeddingModel} onValueChange={setEmbeddingModel}
/> />
</LabelWrapper> </LabelWrapper>
<LabelWrapper <LabelWrapper
label="Language model" label="Language model"
helperText="The embedding model for your Ollama server." helperText="Model used for chat"
id="embedding-model" id="embedding-model"
required={true} required={true}
> >
<ModelSelector <ModelSelector
options={languageModels} options={languageModels}
icon={<OllamaLogo className="w-4 h-4" />} icon={<OllamaLogo className="w-4 h-4" />}
noOptionsPlaceholder={
isLoadingModels
? "Loading models..."
: "No language models detected. Install a language model to continue."
}
value={languageModel} value={languageModel}
onValueChange={setLanguageModel} onValueChange={setLanguageModel}
/> />

View file

@ -1,6 +1,8 @@
import { useState } from "react"; import { useState } from "react";
import { LabelInput } from "@/components/label-input"; import { LabelInput } from "@/components/label-input";
import { LabelWrapper } from "@/components/label-wrapper";
import OpenAILogo from "@/components/logo/openai-logo"; import OpenAILogo from "@/components/logo/openai-logo";
import { Switch } from "@/components/ui/switch";
import { useDebouncedValue } from "@/lib/debounce"; import { useDebouncedValue } from "@/lib/debounce";
import type { OnboardingVariables } from "../../api/mutations/useOnboardingMutation"; import type { OnboardingVariables } from "../../api/mutations/useOnboardingMutation";
import { useGetOpenAIModelsQuery } from "../../api/queries/useGetModelsQuery"; import { useGetOpenAIModelsQuery } from "../../api/queries/useGetModelsQuery";
@ -18,6 +20,7 @@ export function OpenAIOnboarding({
setSampleDataset: (dataset: boolean) => void; setSampleDataset: (dataset: boolean) => void;
}) { }) {
const [apiKey, setApiKey] = useState(""); const [apiKey, setApiKey] = useState("");
const [getFromEnv, setGetFromEnv] = useState(true);
const debouncedApiKey = useDebouncedValue(apiKey, 500); const debouncedApiKey = useDebouncedValue(apiKey, 500);
// Fetch models from API when API key is provided // Fetch models from API when API key is provided
@ -26,7 +29,12 @@ export function OpenAIOnboarding({
isLoading: isLoadingModels, isLoading: isLoadingModels,
error: modelsError, error: modelsError,
} = useGetOpenAIModelsQuery( } = useGetOpenAIModelsQuery(
debouncedApiKey ? { apiKey: debouncedApiKey } : undefined, getFromEnv
? { apiKey: "" }
: debouncedApiKey
? { apiKey: debouncedApiKey }
: undefined,
{ enabled: debouncedApiKey !== "" || getFromEnv },
); );
// Use custom hook for model selection logic // Use custom hook for model selection logic
const { const {
@ -41,6 +49,15 @@ export function OpenAIOnboarding({
setSampleDataset(dataset); setSampleDataset(dataset);
}; };
const handleGetFromEnvChange = (fromEnv: boolean) => {
setGetFromEnv(fromEnv);
if (fromEnv) {
setApiKey("");
}
setLanguageModel("");
setEmbeddingModel("");
};
// Update settings when values change // Update settings when values change
useUpdateSettings( useUpdateSettings(
"openai", "openai",
@ -53,33 +70,41 @@ export function OpenAIOnboarding({
); );
return ( return (
<> <>
<div className="space-y-1"> <div className="space-y-5">
<LabelInput <LabelWrapper
label="OpenAI API key" label="Get API key from environment variable"
helperText="The API key for your OpenAI account." id="get-api-key"
id="api-key" flex
required >
placeholder="sk-..." <Switch
value={apiKey} checked={getFromEnv}
onChange={(e) => setApiKey(e.target.value)} onCheckedChange={handleGetFromEnvChange}
/> />
{isLoadingModels && ( </LabelWrapper>
<p className="text-mmd text-muted-foreground"> {!getFromEnv && (
Validating API key... <div className="space-y-1">
</p> <LabelInput
label="OpenAI API key"
helperText="The API key for your OpenAI account."
id="api-key"
type="password"
required
placeholder="sk-..."
value={apiKey}
onChange={(e) => setApiKey(e.target.value)}
/>
{isLoadingModels && (
<p className="text-mmd text-muted-foreground">
Validating API key...
</p>
)}
{modelsError && (
<p className="text-mmd text-accent-amber-foreground">
Invalid OpenAI API key. Verify or replace the key.
</p>
)}
</div>
)} )}
{modelsError && (
<p className="text-mmd text-accent-amber-foreground">
Invalid API key
</p>
)}
{modelsData &&
(modelsData.language_models?.length > 0 ||
modelsData.embedding_models?.length > 0) && (
<p className="text-mmd text-accent-emerald-foreground">
API Key is valid
</p>
)}
</div> </div>
<AdvancedOnboarding <AdvancedOnboarding
icon={<OpenAILogo className="w-4 h-4" />} icon={<OpenAILogo className="w-4 h-4" />}

View file

@ -4,8 +4,8 @@ import { useRouter } from "next/navigation";
import { Suspense, useEffect, useState } from "react"; import { Suspense, useEffect, useState } from "react";
import { toast } from "sonner"; import { toast } from "sonner";
import { import {
type OnboardingVariables, type OnboardingVariables,
useOnboardingMutation, useOnboardingMutation,
} from "@/app/api/mutations/useOnboardingMutation"; } from "@/app/api/mutations/useOnboardingMutation";
import IBMLogo from "@/components/logo/ibm-logo"; import IBMLogo from "@/components/logo/ibm-logo";
import OllamaLogo from "@/components/logo/ollama-logo"; import OllamaLogo from "@/components/logo/ollama-logo";
@ -13,198 +13,208 @@ import OpenAILogo from "@/components/logo/openai-logo";
import { ProtectedRoute } from "@/components/protected-route"; import { ProtectedRoute } from "@/components/protected-route";
import { Button } from "@/components/ui/button"; import { Button } from "@/components/ui/button";
import { import {
Card, Card,
CardContent, CardContent,
CardFooter, CardFooter,
CardHeader, CardHeader,
} from "@/components/ui/card"; } from "@/components/ui/card";
import { DotPattern } from "@/components/ui/dot-pattern";
import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs"; import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs";
import { import {
Tooltip, Tooltip,
TooltipContent, TooltipContent,
TooltipTrigger, TooltipTrigger,
} from "@/components/ui/tooltip"; } from "@/components/ui/tooltip";
import { cn } from "@/lib/utils";
import { useGetSettingsQuery } from "../api/queries/useGetSettingsQuery"; import { useGetSettingsQuery } from "../api/queries/useGetSettingsQuery";
import { IBMOnboarding } from "./components/ibm-onboarding"; import { IBMOnboarding } from "./components/ibm-onboarding";
import { OllamaOnboarding } from "./components/ollama-onboarding"; import { OllamaOnboarding } from "./components/ollama-onboarding";
import { OpenAIOnboarding } from "./components/openai-onboarding"; import { OpenAIOnboarding } from "./components/openai-onboarding";
function OnboardingPage() { function OnboardingPage() {
const { data: settingsDb, isLoading: isSettingsLoading } = const { data: settingsDb, isLoading: isSettingsLoading } =
useGetSettingsQuery(); useGetSettingsQuery();
const redirect = "/"; const redirect = "/";
const router = useRouter(); const router = useRouter();
// Redirect if already authenticated or in no-auth mode // Redirect if already authenticated or in no-auth mode
useEffect(() => { useEffect(() => {
if (!isSettingsLoading && settingsDb && settingsDb.edited) { if (!isSettingsLoading && settingsDb && settingsDb.edited) {
router.push(redirect); router.push(redirect);
} }
}, [isSettingsLoading, settingsDb, router]); }, [isSettingsLoading, settingsDb, router]);
const [modelProvider, setModelProvider] = useState<string>("openai"); const [modelProvider, setModelProvider] = useState<string>("openai");
const [sampleDataset, setSampleDataset] = useState<boolean>(true); const [sampleDataset, setSampleDataset] = useState<boolean>(true);
const handleSetModelProvider = (provider: string) => { const handleSetModelProvider = (provider: string) => {
setModelProvider(provider); setModelProvider(provider);
setSettings({ setSettings({
model_provider: provider, model_provider: provider,
embedding_model: "", embedding_model: "",
llm_model: "", llm_model: "",
}); });
}; };
const [settings, setSettings] = useState<OnboardingVariables>({ const [settings, setSettings] = useState<OnboardingVariables>({
model_provider: modelProvider, model_provider: modelProvider,
embedding_model: "", embedding_model: "",
llm_model: "", llm_model: "",
}); });
// Mutations // Mutations
const onboardingMutation = useOnboardingMutation({ const onboardingMutation = useOnboardingMutation({
onSuccess: (data) => { onSuccess: (data) => {
toast.success("Onboarding completed successfully!"); toast.success("Onboarding completed successfully!");
console.log("Onboarding completed successfully", data); console.log("Onboarding completed successfully", data);
router.push(redirect); router.push(redirect);
}, },
onError: (error) => { onError: (error) => {
toast.error("Failed to complete onboarding", { toast.error("Failed to complete onboarding", {
description: error.message, description: error.message,
}); });
}, },
}); });
const handleComplete = () => { const handleComplete = () => {
if ( if (
!settings.model_provider || !settings.model_provider ||
!settings.llm_model || !settings.llm_model ||
!settings.embedding_model !settings.embedding_model
) { ) {
toast.error("Please complete all required fields"); toast.error("Please complete all required fields");
return; return;
} }
// Prepare onboarding data // Prepare onboarding data
const onboardingData: OnboardingVariables = { const onboardingData: OnboardingVariables = {
model_provider: settings.model_provider, model_provider: settings.model_provider,
llm_model: settings.llm_model, llm_model: settings.llm_model,
embedding_model: settings.embedding_model, embedding_model: settings.embedding_model,
sample_data: sampleDataset, sample_data: sampleDataset,
}; };
// Add API key if available // Add API key if available
if (settings.api_key) { if (settings.api_key) {
onboardingData.api_key = settings.api_key; onboardingData.api_key = settings.api_key;
} }
// Add endpoint if available // Add endpoint if available
if (settings.endpoint) { if (settings.endpoint) {
onboardingData.endpoint = settings.endpoint; onboardingData.endpoint = settings.endpoint;
} }
// Add project_id if available // Add project_id if available
if (settings.project_id) { if (settings.project_id) {
onboardingData.project_id = settings.project_id; onboardingData.project_id = settings.project_id;
} }
onboardingMutation.mutate(onboardingData); onboardingMutation.mutate(onboardingData);
}; };
const isComplete = !!settings.llm_model && !!settings.embedding_model; const isComplete = !!settings.llm_model && !!settings.embedding_model;
return ( return (
<div <div className="min-h-dvh w-full flex gap-5 flex-col items-center justify-center bg-background relative p-4">
className="min-h-dvh w-full flex gap-5 flex-col items-center justify-center bg-background p-4" <DotPattern
style={{ width={24}
backgroundImage: "url('/images/background.png')", height={24}
backgroundSize: "cover", cx={1}
backgroundPosition: "center", cy={1}
}} cr={1}
> className={cn(
<div className="flex flex-col items-center gap-5 min-h-[550px] w-full"> "[mask-image:linear-gradient(to_bottom,white,transparent,transparent)]",
<div className="flex flex-col items-center justify-center gap-4"> "text-input/70",
<h1 className="text-2xl font-medium font-chivo"> )}
Configure your models />
</h1>
<p className="text-sm text-muted-foreground">[description of task]</p> <div className="flex flex-col items-center gap-5 min-h-[550px] w-full z-10">
</div> <div className="flex flex-col items-center justify-center gap-4">
<Card className="w-full max-w-[580px]"> <h1 className="text-2xl font-medium font-chivo">
<Tabs Connect a model provider
defaultValue={modelProvider} </h1>
onValueChange={handleSetModelProvider} </div>
> <Card className="w-full max-w-[580px]">
<CardHeader> <Tabs
<TabsList> defaultValue={modelProvider}
<TabsTrigger value="openai"> onValueChange={handleSetModelProvider}
<OpenAILogo className="w-4 h-4" /> >
OpenAI <CardHeader>
</TabsTrigger> <TabsList>
<TabsTrigger value="watsonx"> <TabsTrigger value="openai">
<IBMLogo className="w-4 h-4" /> <OpenAILogo className="w-4 h-4" />
IBM OpenAI
</TabsTrigger> </TabsTrigger>
<TabsTrigger value="ollama"> <TabsTrigger value="watsonx">
<OllamaLogo className="w-4 h-4" /> <IBMLogo className="w-4 h-4" />
Ollama IBM
</TabsTrigger> </TabsTrigger>
</TabsList> <TabsTrigger value="ollama">
</CardHeader> <OllamaLogo className="w-4 h-4" />
<CardContent> Ollama
<TabsContent value="openai"> </TabsTrigger>
<OpenAIOnboarding </TabsList>
setSettings={setSettings} </CardHeader>
sampleDataset={sampleDataset} <CardContent>
setSampleDataset={setSampleDataset} <TabsContent value="openai">
/> <OpenAIOnboarding
</TabsContent> setSettings={setSettings}
<TabsContent value="watsonx"> sampleDataset={sampleDataset}
<IBMOnboarding setSampleDataset={setSampleDataset}
setSettings={setSettings} />
sampleDataset={sampleDataset} </TabsContent>
setSampleDataset={setSampleDataset} <TabsContent value="watsonx">
/> <IBMOnboarding
</TabsContent> setSettings={setSettings}
<TabsContent value="ollama"> sampleDataset={sampleDataset}
<OllamaOnboarding setSampleDataset={setSampleDataset}
setSettings={setSettings} />
sampleDataset={sampleDataset} </TabsContent>
setSampleDataset={setSampleDataset} <TabsContent value="ollama">
/> <OllamaOnboarding
</TabsContent> setSettings={setSettings}
</CardContent> sampleDataset={sampleDataset}
</Tabs> setSampleDataset={setSampleDataset}
<CardFooter className="flex justify-end"> />
<Tooltip> </TabsContent>
<TooltipTrigger asChild> </CardContent>
<Button </Tabs>
size="sm" <CardFooter className="flex justify-end">
onClick={handleComplete} <Tooltip>
disabled={!isComplete} <TooltipTrigger asChild>
loading={onboardingMutation.isPending} <div>
> <Button
Complete size="sm"
</Button> onClick={handleComplete}
</TooltipTrigger> disabled={!isComplete}
<TooltipContent> loading={onboardingMutation.isPending}
{!isComplete ? "Please fill in all required fields" : ""} >
</TooltipContent> Complete
</Tooltip> </Button>
</CardFooter> </div>
</Card> </TooltipTrigger>
</div> {!isComplete && (
</div> <TooltipContent>
); Please fill in all required fields
</TooltipContent>
)}
</Tooltip>
</CardFooter>
</Card>
</div>
</div>
);
} }
export default function ProtectedOnboardingPage() { export default function ProtectedOnboardingPage() {
return ( return (
<ProtectedRoute> <ProtectedRoute>
<Suspense fallback={<div>Loading onboarding...</div>}> <Suspense fallback={<div>Loading onboarding...</div>}>
<OnboardingPage /> <OnboardingPage />
</Suspense> </Suspense>
</ProtectedRoute> </ProtectedRoute>
); );
} }

View file

@ -35,10 +35,11 @@ import { Textarea } from "@/components/ui/textarea";
import { useAuth } from "@/contexts/auth-context"; import { useAuth } from "@/contexts/auth-context";
import { useTask } from "@/contexts/task-context"; import { useTask } from "@/contexts/task-context";
import { useDebounce } from "@/lib/debounce"; import { useDebounce } from "@/lib/debounce";
import { DEFAULT_AGENT_SETTINGS, DEFAULT_KNOWLEDGE_SETTINGS, UI_CONSTANTS } from "@/lib/constants";
import { getFallbackModels, type ModelProvider } from "./helpers/model-helpers"; import { getFallbackModels, type ModelProvider } from "./helpers/model-helpers";
import { ModelSelectItems } from "./helpers/model-select-item"; import { ModelSelectItems } from "./helpers/model-select-item";
const MAX_SYSTEM_PROMPT_CHARS = 2000; const { MAX_SYSTEM_PROMPT_CHARS } = UI_CONSTANTS;
interface GoogleDriveFile { interface GoogleDriveFile {
id: string; id: string;
@ -529,8 +530,17 @@ function KnowledgeSourcesPage() {
fetch(`/api/reset-flow/retrieval`, { fetch(`/api/reset-flow/retrieval`, {
method: "POST", method: "POST",
}) })
.then((response) => response.json()) .then((response) => {
if (response.ok) {
return response.json();
}
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
})
.then(() => { .then(() => {
// Only reset form values if the API call was successful
setSystemPrompt(DEFAULT_AGENT_SETTINGS.system_prompt);
// Trigger model update to default model
handleModelChange(DEFAULT_AGENT_SETTINGS.llm_model);
closeDialog(); // Close after successful completion closeDialog(); // Close after successful completion
}) })
.catch((error) => { .catch((error) => {
@ -543,8 +553,17 @@ function KnowledgeSourcesPage() {
fetch(`/api/reset-flow/ingest`, { fetch(`/api/reset-flow/ingest`, {
method: "POST", method: "POST",
}) })
.then((response) => response.json()) .then((response) => {
if (response.ok) {
return response.json();
}
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
})
.then(() => { .then(() => {
// Only reset form values if the API call was successful
setChunkSize(DEFAULT_KNOWLEDGE_SETTINGS.chunk_size);
setChunkOverlap(DEFAULT_KNOWLEDGE_SETTINGS.chunk_overlap);
setProcessingMode(DEFAULT_KNOWLEDGE_SETTINGS.processing_mode);
closeDialog(); // Close after successful completion closeDialog(); // Close after successful completion
}) })
.catch((error) => { .catch((error) => {
@ -764,8 +783,9 @@ function KnowledgeSourcesPage() {
"text-embedding-ada-002" "text-embedding-ada-002"
} }
onValueChange={handleEmbeddingModelChange} onValueChange={handleEmbeddingModelChange}
disabled={true}
> >
<SelectTrigger id="embedding-model-select"> <SelectTrigger id="embedding-model-select" disabled={true}>
<SelectValue placeholder="Select an embedding model" /> <SelectValue placeholder="Select an embedding model" />
</SelectTrigger> </SelectTrigger>
<SelectContent> <SelectContent>

View file

@ -0,0 +1,23 @@
/**
* Default agent settings
*/
export const DEFAULT_AGENT_SETTINGS = {
llm_model: "gpt-4o-mini",
system_prompt: "You are a helpful assistant that can use tools to answer questions and perform tasks."
} as const;
/**
* Default knowledge/ingest settings
*/
export const DEFAULT_KNOWLEDGE_SETTINGS = {
chunk_size: 1000,
chunk_overlap: 200,
processing_mode: "standard"
} as const;
/**
* UI Constants
*/
export const UI_CONSTANTS = {
MAX_SYSTEM_PROMPT_CHARS: 2000,
} as const;

View file

@ -179,6 +179,7 @@ async def update_settings(request, session_manager):
"chunk_size", "chunk_size",
"chunk_overlap", "chunk_overlap",
"doclingPresets", "doclingPresets",
"embedding_model",
} }
# Check for invalid fields # Check for invalid fields
@ -199,11 +200,61 @@ async def update_settings(request, session_manager):
current_config.agent.llm_model = body["llm_model"] current_config.agent.llm_model = body["llm_model"]
config_updated = True config_updated = True
# Also update the chat flow with the new model
try:
flows_service = _get_flows_service()
await flows_service.update_chat_flow_model(body["llm_model"])
logger.info(
f"Successfully updated chat flow model to '{body['llm_model']}'"
)
except Exception as e:
logger.error(f"Failed to update chat flow model: {str(e)}")
# Don't fail the entire settings update if flow update fails
# The config will still be saved
if "system_prompt" in body: if "system_prompt" in body:
current_config.agent.system_prompt = body["system_prompt"] current_config.agent.system_prompt = body["system_prompt"]
config_updated = True config_updated = True
# Also update the chat flow with the new system prompt
try:
flows_service = _get_flows_service()
await flows_service.update_chat_flow_system_prompt(
body["system_prompt"]
)
logger.info(f"Successfully updated chat flow system prompt")
except Exception as e:
logger.error(f"Failed to update chat flow system prompt: {str(e)}")
# Don't fail the entire settings update if flow update fails
# The config will still be saved
# Update knowledge settings # Update knowledge settings
if "embedding_model" in body:
if (
not isinstance(body["embedding_model"], str)
or not body["embedding_model"].strip()
):
return JSONResponse(
{"error": "embedding_model must be a non-empty string"},
status_code=400,
)
current_config.knowledge.embedding_model = body["embedding_model"].strip()
config_updated = True
# Also update the ingest flow with the new embedding model
try:
flows_service = _get_flows_service()
await flows_service.update_ingest_flow_embedding_model(
body["embedding_model"].strip()
)
logger.info(
f"Successfully updated ingest flow embedding model to '{body['embedding_model'].strip()}'"
)
except Exception as e:
logger.error(f"Failed to update ingest flow embedding model: {str(e)}")
# Don't fail the entire settings update if flow update fails
# The config will still be saved
if "doclingPresets" in body: if "doclingPresets" in body:
preset_configs = get_docling_preset_configs() preset_configs = get_docling_preset_configs()
valid_presets = list(preset_configs.keys()) valid_presets = list(preset_configs.keys())
@ -219,7 +270,8 @@ async def update_settings(request, session_manager):
# Also update the flow with the new docling preset # Also update the flow with the new docling preset
try: try:
await _update_flow_docling_preset( flows_service = _get_flows_service()
await flows_service.update_flow_docling_preset(
body["doclingPresets"], preset_configs[body["doclingPresets"]] body["doclingPresets"], preset_configs[body["doclingPresets"]]
) )
logger.info( logger.info(
@ -238,6 +290,18 @@ async def update_settings(request, session_manager):
current_config.knowledge.chunk_size = body["chunk_size"] current_config.knowledge.chunk_size = body["chunk_size"]
config_updated = True config_updated = True
# Also update the ingest flow with the new chunk size
try:
flows_service = _get_flows_service()
await flows_service.update_ingest_flow_chunk_size(body["chunk_size"])
logger.info(
f"Successfully updated ingest flow chunk size to {body['chunk_size']}"
)
except Exception as e:
logger.error(f"Failed to update ingest flow chunk size: {str(e)}")
# Don't fail the entire settings update if flow update fails
# The config will still be saved
if "chunk_overlap" in body: if "chunk_overlap" in body:
if not isinstance(body["chunk_overlap"], int) or body["chunk_overlap"] < 0: if not isinstance(body["chunk_overlap"], int) or body["chunk_overlap"] < 0:
return JSONResponse( return JSONResponse(
@ -247,6 +311,20 @@ async def update_settings(request, session_manager):
current_config.knowledge.chunk_overlap = body["chunk_overlap"] current_config.knowledge.chunk_overlap = body["chunk_overlap"]
config_updated = True config_updated = True
# Also update the ingest flow with the new chunk overlap
try:
flows_service = _get_flows_service()
await flows_service.update_ingest_flow_chunk_overlap(
body["chunk_overlap"]
)
logger.info(
f"Successfully updated ingest flow chunk overlap to {body['chunk_overlap']}"
)
except Exception as e:
logger.error(f"Failed to update ingest flow chunk overlap: {str(e)}")
# Don't fail the entire settings update if flow update fails
# The config will still be saved
if not config_updated: if not config_updated:
return JSONResponse( return JSONResponse(
{"error": "No valid fields provided for update"}, status_code=400 {"error": "No valid fields provided for update"}, status_code=400
@ -525,63 +603,11 @@ async def onboarding(request, flows_service):
) )
async def _update_flow_docling_preset(preset: str, preset_config: dict): def _get_flows_service():
"""Helper function to update docling preset in the ingest flow""" """Helper function to get flows service instance"""
if not LANGFLOW_INGEST_FLOW_ID: from services.flows_service import FlowsService
raise ValueError("LANGFLOW_INGEST_FLOW_ID is not configured")
# Get the current flow data from Langflow return FlowsService()
response = await clients.langflow_request(
"GET", f"/api/v1/flows/{LANGFLOW_INGEST_FLOW_ID}"
)
if response.status_code != 200:
raise Exception(
f"Failed to get ingest flow: HTTP {response.status_code} - {response.text}"
)
flow_data = response.json()
# Find the target node in the flow using environment variable
nodes = flow_data.get("data", {}).get("nodes", [])
target_node = None
target_node_index = None
for i, node in enumerate(nodes):
if node.get("id") == DOCLING_COMPONENT_ID:
target_node = node
target_node_index = i
break
if target_node is None:
raise Exception(
f"Docling component '{DOCLING_COMPONENT_ID}' not found in ingest flow"
)
# Update the docling_serve_opts value directly in the existing node
if (
target_node.get("data", {})
.get("node", {})
.get("template", {})
.get("docling_serve_opts")
):
flow_data["data"]["nodes"][target_node_index]["data"]["node"]["template"][
"docling_serve_opts"
]["value"] = preset_config
else:
raise Exception(
f"docling_serve_opts field not found in node '{DOCLING_COMPONENT_ID}'"
)
# Update the flow via PATCH request
patch_response = await clients.langflow_request(
"PATCH", f"/api/v1/flows/{LANGFLOW_INGEST_FLOW_ID}", json=flow_data
)
if patch_response.status_code != 200:
raise Exception(
f"Failed to update ingest flow: HTTP {patch_response.status_code} - {patch_response.text}"
)
async def update_docling_preset(request, session_manager): async def update_docling_preset(request, session_manager):
@ -612,7 +638,8 @@ async def update_docling_preset(request, session_manager):
preset_config = preset_configs[preset] preset_config = preset_configs[preset]
# Use the helper function to update the flow # Use the helper function to update the flow
await _update_flow_docling_preset(preset, preset_config) flows_service = _get_flows_service()
await flows_service.update_flow_docling_preset(preset, preset_config)
logger.info(f"Successfully updated docling preset to '{preset}' in ingest flow") logger.info(f"Successfully updated docling preset to '{preset}' in ingest flow")

View file

@ -392,8 +392,6 @@ async def startup_tasks(services):
"""Startup tasks""" """Startup tasks"""
logger.info("Starting startup tasks") logger.info("Starting startup tasks")
await init_index() await init_index()
# Sample data ingestion is now handled by the onboarding endpoint when sample_data=True
logger.info("Sample data ingestion moved to onboarding endpoint")
async def initialize_services(): async def initialize_services():
@ -939,7 +937,8 @@ async def create_app():
"/settings", "/settings",
require_auth(services["session_manager"])( require_auth(services["session_manager"])(
partial( partial(
settings.update_settings, session_manager=services["session_manager"] settings.update_settings,
session_manager=services["session_manager"],
) )
), ),
methods=["POST"], methods=["POST"],
@ -951,7 +950,7 @@ async def create_app():
partial( partial(
models.get_openai_models, models.get_openai_models,
models_service=services["models_service"], models_service=services["models_service"],
session_manager=services["session_manager"] session_manager=services["session_manager"],
) )
), ),
methods=["GET"], methods=["GET"],
@ -962,7 +961,7 @@ async def create_app():
partial( partial(
models.get_ollama_models, models.get_ollama_models,
models_service=services["models_service"], models_service=services["models_service"],
session_manager=services["session_manager"] session_manager=services["session_manager"],
) )
), ),
methods=["GET"], methods=["GET"],
@ -973,7 +972,7 @@ async def create_app():
partial( partial(
models.get_ibm_models, models.get_ibm_models,
models_service=services["models_service"], models_service=services["models_service"],
session_manager=services["session_manager"] session_manager=services["session_manager"],
) )
), ),
methods=["GET", "POST"], methods=["GET", "POST"],
@ -982,10 +981,7 @@ async def create_app():
Route( Route(
"/onboarding", "/onboarding",
require_auth(services["session_manager"])( require_auth(services["session_manager"])(
partial( partial(settings.onboarding, flows_service=services["flows_service"])
settings.onboarding,
flows_service=services["flows_service"]
)
), ),
methods=["POST"], methods=["POST"],
), ),
@ -995,7 +991,7 @@ async def create_app():
require_auth(services["session_manager"])( require_auth(services["session_manager"])(
partial( partial(
settings.update_docling_preset, settings.update_docling_preset,
session_manager=services["session_manager"] session_manager=services["session_manager"],
) )
), ),
methods=["PATCH"], methods=["PATCH"],

View file

@ -400,6 +400,123 @@ class FlowsService:
return node return node
return None return None
def _find_node_in_flow(self, flow_data, node_id=None, display_name=None):
"""
Helper function to find a node in flow data by ID or display name.
Returns tuple of (node, node_index) or (None, None) if not found.
"""
nodes = flow_data.get("data", {}).get("nodes", [])
for i, node in enumerate(nodes):
node_data = node.get("data", {})
node_template = node_data.get("node", {})
# Check by ID if provided
if node_id and node_data.get("id") == node_id:
return node, i
# Check by display_name if provided
if display_name and node_template.get("display_name") == display_name:
return node, i
return None, None
async def _update_flow_field(self, flow_id: str, field_name: str, field_value: str, node_display_name: str = None, node_id: str = None):
"""
Generic helper function to update any field in any Langflow component.
Args:
flow_id: The ID of the flow to update
field_name: The name of the field to update (e.g., 'model_name', 'system_message', 'docling_serve_opts')
field_value: The new value to set
node_display_name: The display name to search for (optional)
node_id: The node ID to search for (optional, used as fallback or primary)
"""
if not flow_id:
raise ValueError("flow_id is required")
# Get the current flow data from Langflow
response = await clients.langflow_request(
"GET", f"/api/v1/flows/{flow_id}"
)
if response.status_code != 200:
raise Exception(f"Failed to get flow: HTTP {response.status_code} - {response.text}")
flow_data = response.json()
# Find the target component by display name first, then by ID as fallback
target_node, target_node_index = None, None
if node_display_name:
target_node, target_node_index = self._find_node_in_flow(flow_data, display_name=node_display_name)
if target_node is None and node_id:
target_node, target_node_index = self._find_node_in_flow(flow_data, node_id=node_id)
if target_node is None:
identifier = node_display_name or node_id
raise Exception(f"Component '{identifier}' not found in flow {flow_id}")
# Update the field value directly in the existing node
template = target_node.get("data", {}).get("node", {}).get("template", {})
if template.get(field_name):
flow_data["data"]["nodes"][target_node_index]["data"]["node"]["template"][field_name]["value"] = field_value
else:
identifier = node_display_name or node_id
raise Exception(f"{field_name} field not found in {identifier} component")
# Update the flow via PATCH request
patch_response = await clients.langflow_request(
"PATCH", f"/api/v1/flows/{flow_id}", json=flow_data
)
if patch_response.status_code != 200:
raise Exception(f"Failed to update flow: HTTP {patch_response.status_code} - {patch_response.text}")
async def update_chat_flow_model(self, model_name: str):
"""Helper function to update the model in the chat flow"""
if not LANGFLOW_CHAT_FLOW_ID:
raise ValueError("LANGFLOW_CHAT_FLOW_ID is not configured")
await self._update_flow_field(LANGFLOW_CHAT_FLOW_ID, "model_name", model_name,
node_display_name="Language Model")
async def update_chat_flow_system_prompt(self, system_prompt: str):
"""Helper function to update the system prompt in the chat flow"""
if not LANGFLOW_CHAT_FLOW_ID:
raise ValueError("LANGFLOW_CHAT_FLOW_ID is not configured")
await self._update_flow_field(LANGFLOW_CHAT_FLOW_ID, "system_prompt", system_prompt,
node_display_name="Agent")
async def update_flow_docling_preset(self, preset: str, preset_config: dict):
"""Helper function to update docling preset in the ingest flow"""
if not LANGFLOW_INGEST_FLOW_ID:
raise ValueError("LANGFLOW_INGEST_FLOW_ID is not configured")
from config.settings import DOCLING_COMPONENT_ID
await self._update_flow_field(LANGFLOW_INGEST_FLOW_ID, "docling_serve_opts", preset_config,
node_id=DOCLING_COMPONENT_ID)
async def update_ingest_flow_chunk_size(self, chunk_size: int):
"""Helper function to update chunk size in the ingest flow"""
if not LANGFLOW_INGEST_FLOW_ID:
raise ValueError("LANGFLOW_INGEST_FLOW_ID is not configured")
await self._update_flow_field(LANGFLOW_INGEST_FLOW_ID, "chunk_size", chunk_size,
node_display_name="Split Text")
async def update_ingest_flow_chunk_overlap(self, chunk_overlap: int):
"""Helper function to update chunk overlap in the ingest flow"""
if not LANGFLOW_INGEST_FLOW_ID:
raise ValueError("LANGFLOW_INGEST_FLOW_ID is not configured")
await self._update_flow_field(LANGFLOW_INGEST_FLOW_ID, "chunk_overlap", chunk_overlap,
node_display_name="Split Text")
async def update_ingest_flow_embedding_model(self, embedding_model: str):
"""Helper function to update embedding model in the ingest flow"""
if not LANGFLOW_INGEST_FLOW_ID:
raise ValueError("LANGFLOW_INGEST_FLOW_ID is not configured")
await self._update_flow_field(LANGFLOW_INGEST_FLOW_ID, "model", embedding_model,
node_display_name="Embedding Model")
def _replace_node_in_flow(self, flow_data, old_id, new_node): def _replace_node_in_flow(self, flow_data, old_id, new_node):
"""Replace a node in the flow data""" """Replace a node in the flow data"""
nodes = flow_data.get("data", {}).get("nodes", []) nodes = flow_data.get("data", {}).get("nodes", [])