Merge branch 'main' into feat-folder-picker

This commit is contained in:
Eric Hare 2025-11-19 14:58:42 -08:00 committed by GitHub
commit 1a188161f6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
26 changed files with 2540 additions and 1896 deletions

View file

@ -2817,7 +2817,7 @@
"trace_as_input": true,
"trace_as_metadata": true,
"type": "str",
"value": "You are the OpenRAG Agent. You answer questions using retrieval, reasoning, and tool use.\nYou have access to several tools. Your job is to determine **which tool to use and when**.\n### Available Tools\n- OpenSearch Retrieval Tool:\n Use this to search the indexed knowledge base. Use when the user asks about product details, internal concepts, processes, architecture, documentation, roadmaps, or anything that may be stored in the index.\n- Conversation History:\n Use this to maintain continuity when the user is referring to previous turns. \n Do not treat history as a factual source.\n- Conversation File Context:\n Use this when the user asks about a document they uploaded or refers directly to its contents.\n- URL Ingestion Tool:\n Use this **only** when the user explicitly asks you to read, summarize, or analyze the content of a URL.\n Do not ingest URLs automatically.\n- Calculator / Expression Evaluation Tool:\n Use this when the user asks to compare numbers, compute estimates, calculate totals, analyze pricing, or answer any question requiring mathematics or quantitative reasoning.\n If the answer requires arithmetic, call the calculator tool rather than calculating internally.\n### Retrieval Decision Rules\nUse OpenSearch **whenever**:\n1. The question may be answered from internal or indexed data.\n2. The user references team names, product names, release plans, configurations, requirements, or official information.\n3. The user needs a factual, grounded answer.\nDo **not** use retrieval if:\n- The question is purely creative (e.g., storytelling, analogies) or personal preference.\n- The user simply wants text reformatted or rewritten from what is already present in the conversation.\nWhen uncertain → **Retrieve.** Retrieval is low risk and improves grounding.\n### URL Ingestion Rules\nOnly ingest URLs when the user explicitly says:\n- \"Read this link\"\n- \"Summarize this webpage\"\n- \"What does this site say?\"\n- \"Ingest this URL\"\nIf unclear → ask a clarifying question.\n### Calculator Usage Rules\nUse the calculator when:\n- Performing arithmetic\n- Estimating totals\n- Comparing values\n- Modeling cost, time, effort, scale, or projections\nDo not perform math internally. **Call the calculator tool instead.**\n### Answer Construction Rules\n1. When asked: \"What is OpenRAG\", answer the following:\n\"OpenRAG is an open-source package for building agentic RAG systems. It supports integration with a wide range of orchestration tools, vector databases, and LLM providers. OpenRAG connects and amplifies three popular, proven open-source projects into one powerful platform:\n**Langflow** Langflow is a powerful tool to build and deploy AI agents and MCP servers [Read more](https://www.langflow.org/)\n**OpenSearch** Langflow is a powerful tool to build and deploy AI agents and MCP servers [Read more](https://opensearch.org/)\n**Docling** Langflow is a powerful tool to build and deploy AI agents and MCP servers [Read more](https://www.docling.ai/)\"\n2. Synthesize retrieved or ingested content in your own words.\n3. Support factual claims with citations in the format:\n (Source: <document_name_or_id>)\n4. If no supporting evidence is found:\n Say: \"No relevant supporting sources were found for that request.\"\n5. Never invent facts or hallucinate details.\n6. Be concise, direct, and confident. \n7. Do not reveal internal chain-of-thought."
"value": "You are the OpenRAG Agent. You answer questions using retrieval, reasoning, and tool use.\nYou have access to several tools. Your job is to determine **which tool to use and when**.\n### Available Tools\n- OpenSearch Retrieval Tool:\n Use this to search the indexed knowledge base. Use when the user asks about product details, internal concepts, processes, architecture, documentation, roadmaps, or anything that may be stored in the index.\n- Conversation History:\n Use this to maintain continuity when the user is referring to previous turns. \n Do not treat history as a factual source.\n- Conversation File Context:\n Use this when the user asks about a document they uploaded or refers directly to its contents.\n- URL Ingestion Tool:\n Use this **only** when the user explicitly asks you to read, summarize, or analyze the content of a URL.\n Do not ingest URLs automatically.\n- Calculator / Expression Evaluation Tool:\n Use this when the user asks to compare numbers, compute estimates, calculate totals, analyze pricing, or answer any question requiring mathematics or quantitative reasoning.\n If the answer requires arithmetic, call the calculator tool rather than calculating internally.\n### Retrieval Decision Rules\nUse OpenSearch **whenever**:\n1. The question may be answered from internal or indexed data.\n2. The user references team names, product names, release plans, configurations, requirements, or official information.\n3. The user needs a factual, grounded answer.\nDo **not** use retrieval if:\n- The question is purely creative (e.g., storytelling, analogies) or personal preference.\n- The user simply wants text reformatted or rewritten from what is already present in the conversation.\nWhen uncertain → **Retrieve.** Retrieval is low risk and improves grounding.\n### URL Ingestion Rules\nOnly ingest URLs when the user explicitly says:\n- \"Read this link\"\n- \"Summarize this webpage\"\n- \"What does this site say?\"\n- \"Ingest this URL\"\nIf unclear → ask a clarifying question.\n### Calculator Usage Rules\nUse the calculator when:\n- Performing arithmetic\n- Estimating totals\n- Comparing values\n- Modeling cost, time, effort, scale, or projections\nDo not perform math internally. **Call the calculator tool instead.**\n### Answer Construction Rules\n1. When asked: \"What is OpenRAG\", answer the following:\n\"OpenRAG is an open-source package for building agentic RAG systems. It supports integration with a wide range of orchestration tools, vector databases, and LLM providers. OpenRAG connects and amplifies three popular, proven open-source projects into one powerful platform:\n**Langflow** Langflow is a powerful tool to build and deploy AI agents and MCP servers. [Read more](https://www.langflow.org/)\n**OpenSearch** OpenSearch is an open source, search and observability suite that brings order to unstructured data at scale. [Read more](https://opensearch.org/)\n**Docling** Docling simplifies document processing with advanced PDF understanding, OCR support, and seamless AI integrations. Parse PDFs, DOCX, PPTX, images & more. [Read more](https://www.docling.ai/)\"\n2. Synthesize retrieved or ingested content in your own words.\n3. Support factual claims with citations in the format:\n (Source: <document_name_or_id>)\n4. If no supporting evidence is found:\n Say: \"No relevant supporting sources were found for that request.\"\n5. Never invent facts or hallucinate details.\n6. Be concise, direct, and confident. \n7. Do not reveal internal chain-of-thought."
},
"temperature": {
"_input_type": "SliderInput",

View file

@ -119,9 +119,9 @@ function AuthCallbackContent() {
localStorage.removeItem("connecting_connector_type");
localStorage.removeItem("auth_purpose");
// Redirect to connectors page with success indicator
// Redirect to settings page with success indicator
setTimeout(() => {
router.push("/connectors?oauth_success=true");
router.push("/settings?oauth_success=true");
}, 2000);
}
} else {
@ -207,13 +207,13 @@ function AuthCallbackContent() {
</div>
<Button
onClick={() =>
router.push(isAppAuth ? "/login" : "/connectors")
router.push(isAppAuth ? "/login" : "/settings")
}
variant="outline"
className="w-full"
>
<ArrowLeft className="h-4 w-4 mr-2" />
{isAppAuth ? "Back to Login" : "Back to Connectors"}
{isAppAuth ? "Back to Login" : "Back to Settings"}
</Button>
</div>
)}
@ -223,7 +223,7 @@ function AuthCallbackContent() {
<p className="text-sm text-green-600">
{isAppAuth
? "Redirecting you to the app..."
: "Redirecting to connectors..."}
: "Redirecting to settings..."}
</p>
</div>
</div>

View file

@ -1,6 +1,9 @@
import { ArrowRight, Check, Funnel, Loader2, Plus } from "lucide-react";
import { AnimatePresence, motion } from "motion/react";
import { forwardRef, useImperativeHandle, useRef, useState } from "react";
import { useDropzone } from "react-dropzone";
import TextareaAutosize from "react-textarea-autosize";
import { toast } from "sonner";
import type { FilterColor } from "@/components/filter-icon-popover";
import { Button } from "@/components/ui/button";
import {
@ -8,6 +11,8 @@ import {
PopoverAnchor,
PopoverContent,
} from "@/components/ui/popover";
import { useFileDrag } from "@/hooks/use-file-drag";
import { cn } from "@/lib/utils";
import type { KnowledgeFilterData } from "../_types/types";
import { FilePreview } from "./file-preview";
import { SelectedKnowledgeFilter } from "./selected-knowledge-filter";
@ -71,6 +76,27 @@ export const ChatInput = forwardRef<ChatInputHandle, ChatInputProps>(
const inputRef = useRef<HTMLTextAreaElement>(null);
const fileInputRef = useRef<HTMLInputElement>(null);
const [textareaHeight, setTextareaHeight] = useState(0);
const isDragging = useFileDrag();
const { getRootProps, getInputProps } = useDropzone({
accept: {
"application/pdf": [".pdf"],
"application/msword": [".doc"],
"application/vnd.openxmlformats-officedocument.wordprocessingml.document":
[".docx"],
"text/markdown": [".md"],
},
maxFiles: 1,
disabled: !isDragging,
onDrop: (acceptedFiles, fileRejections) => {
if (fileRejections.length > 0) {
const message = fileRejections.at(0)?.errors.at(0)?.message;
toast.error(message || "Failed to upload file");
return;
}
onFileSelected(acceptedFiles[0]);
},
});
useImperativeHandle(ref, () => ({
focusInput: () => {
@ -94,17 +120,53 @@ export const ChatInput = forwardRef<ChatInputHandle, ChatInputProps>(
<div className="w-full">
<form onSubmit={onSubmit} className="relative">
{/* Outer container - flex-col to stack file preview above input */}
<div className="flex flex-col w-full gap-2 rounded-xl border border-input hover:[&:not(:focus-within)]:border-muted-foreground focus-within:border-foreground p-2 transition-colors">
{/* File Preview Section - Always above */}
{uploadedFile && (
<FilePreview
uploadedFile={uploadedFile}
onClear={() => {
onFileSelected(null);
}}
/>
<div
{...getRootProps()}
className={cn(
"flex flex-col w-full p-2 rounded-xl border border-input transition-all",
!isDragging &&
"hover:[&:not(:focus-within)]:border-muted-foreground focus-within:border-foreground",
isDragging && "border-dashed",
)}
>
<input {...getInputProps()} />
{/* File Preview Section - Always above */}
<AnimatePresence>
{uploadedFile && (
<motion.div
initial={{ opacity: 0, height: 0, marginBottom: 0 }}
animate={{ opacity: 1, height: "auto", marginBottom: 8 }}
exit={{ opacity: 0, height: 0, marginBottom: 0 }}
className="overflow-hidden"
>
<FilePreview
uploadedFile={uploadedFile}
onClear={() => {
onFileSelected(null);
}}
/>
</motion.div>
)}
</AnimatePresence>
<AnimatePresence>
{isDragging && (
<motion.div
initial={{ opacity: 0, height: 0 }}
animate={{ opacity: 1, height: 100 }}
exit={{ opacity: 0, height: 0 }}
className="overflow-hidden w-full flex flex-col items-center justify-center gap-2"
>
<p className="text-md font-medium text-primary">
Add files to conversation
</p>
<p className="text-sm text-muted-foreground">
Text formats and image files.{" "}
<span className="font-semibold">10</span> files per chat,{" "}
<span className="font-semibold">150 MB</span> each.
</p>
</motion.div>
)}
</AnimatePresence>
{/* Main Input Container - flex-row or flex-col based on textarea height */}
<div
className={`relative flex w-full gap-2 ${

View file

@ -71,6 +71,7 @@ function ChatPage() {
y: number;
} | null>(null);
const [uploadedFile, setUploadedFile] = useState<File | null>(null);
const [waitingTooLong, setWaitingTooLong] = useState(false);
const chatInputRef = useRef<ChatInputHandle>(null);
@ -87,11 +88,13 @@ function ChatPage() {
streamingMessage,
sendMessage: sendStreamingMessage,
abortStream,
isLoading: isStreamLoading,
} = useChatStreaming({
endpoint: apiEndpoint,
onComplete: (message, responseId) => {
setMessages((prev) => [...prev, message]);
setLoading(false);
setWaitingTooLong(false);
if (responseId) {
cancelNudges();
@ -111,6 +114,7 @@ function ChatPage() {
onError: (error) => {
console.error("Streaming error:", error);
setLoading(false);
setWaitingTooLong(false);
const errorMessage: Message = {
role: "assistant",
content:
@ -120,6 +124,23 @@ function ChatPage() {
setMessages((prev) => [...prev, errorMessage]);
},
});
// Show warning if waiting too long (20 seconds)
useEffect(() => {
let timeoutId: NodeJS.Timeout | null = null;
if (isStreamLoading && !streamingMessage) {
timeoutId = setTimeout(() => {
setWaitingTooLong(true);
}, 20000); // 20 seconds
} else {
setWaitingTooLong(false);
}
return () => {
if (timeoutId) clearTimeout(timeoutId);
};
}, [isStreamLoading, streamingMessage]);
const getCursorPosition = (textarea: HTMLTextAreaElement) => {
// Create a hidden div with the same styles as the textarea
@ -1310,6 +1331,19 @@ function ChatPage() {
isCompleted={false}
/>
)}
{/* Waiting too long indicator */}
{waitingTooLong && !streamingMessage && loading && (
<div className="pl-10 space-y-2">
<div className="flex items-center gap-2 text-sm text-muted-foreground">
<Loader2 className="h-4 w-4 animate-spin" />
<span>The server is taking longer than expected...</span>
</div>
<p className="text-xs text-muted-foreground">
This may be due to high server load. The request will timeout after 60 seconds.
</p>
</div>
)}
</>
)}
{!streamingMessage && (

View file

@ -41,6 +41,7 @@ export function ModelSelector({
noOptionsPlaceholder = "No models available",
custom = false,
hasError = false,
defaultOpen = false,
}: {
options?: ModelOption[];
groupedOptions?: GroupedModelOption[];
@ -52,8 +53,9 @@ export function ModelSelector({
custom?: boolean;
onValueChange: (value: string, provider?: string) => void;
hasError?: boolean;
defaultOpen?: boolean;
}) {
const [open, setOpen] = useState(false);
const [open, setOpen] = useState(defaultOpen);
const [searchValue, setSearchValue] = useState("");
// Flatten grouped options or use regular options
@ -77,6 +79,13 @@ export function ModelSelector({
}
}, [allOptions, value, custom, onValueChange]);
// Update open state when defaultOpen changes
useEffect(() => {
if (defaultOpen) {
setOpen(true);
}
}, [defaultOpen]);
return (
<Popover open={open} onOpenChange={setOpen} modal={false}>
<PopoverTrigger asChild>

View file

@ -9,150 +9,161 @@ import type { ProviderHealthResponse } from "@/app/api/queries/useProviderHealth
import AnthropicLogo from "@/components/icons/anthropic-logo";
import { Button } from "@/components/ui/button";
import {
Dialog,
DialogContent,
DialogFooter,
DialogHeader,
DialogTitle,
Dialog,
DialogContent,
DialogFooter,
DialogHeader,
DialogTitle,
} from "@/components/ui/dialog";
import {
AnthropicSettingsForm,
type AnthropicSettingsFormData,
AnthropicSettingsForm,
type AnthropicSettingsFormData,
} from "./anthropic-settings-form";
import { useRouter } from "next/navigation";
const AnthropicSettingsDialog = ({
open,
setOpen,
open,
setOpen,
}: {
open: boolean;
setOpen: (open: boolean) => void;
open: boolean;
setOpen: (open: boolean) => void;
}) => {
const queryClient = useQueryClient();
const [isValidating, setIsValidating] = useState(false);
const [validationError, setValidationError] = useState<Error | null>(null);
const queryClient = useQueryClient();
const [isValidating, setIsValidating] = useState(false);
const [validationError, setValidationError] = useState<Error | null>(null);
const router = useRouter();
const methods = useForm<AnthropicSettingsFormData>({
mode: "onSubmit",
defaultValues: {
apiKey: "",
},
});
const methods = useForm<AnthropicSettingsFormData>({
mode: "onSubmit",
defaultValues: {
apiKey: "",
},
});
const { handleSubmit, watch } = methods;
const apiKey = watch("apiKey");
const { handleSubmit, watch } = methods;
const apiKey = watch("apiKey");
const { refetch: validateCredentials } = useGetAnthropicModelsQuery(
{
apiKey: apiKey,
},
{
enabled: false,
},
);
const { refetch: validateCredentials } = useGetAnthropicModelsQuery(
{
apiKey: apiKey,
},
{
enabled: false,
},
);
const settingsMutation = useUpdateSettingsMutation({
onSuccess: () => {
// Update provider health cache to healthy since backend validated the setup
const healthData: ProviderHealthResponse = {
status: "healthy",
message: "Provider is configured and working correctly",
provider: "anthropic",
};
queryClient.setQueryData(["provider", "health"], healthData);
const settingsMutation = useUpdateSettingsMutation({
onSuccess: () => {
// Update provider health cache to healthy since backend validated the setup
const healthData: ProviderHealthResponse = {
status: "healthy",
message: "Provider is configured and working correctly",
provider: "anthropic",
};
queryClient.setQueryData(["provider", "health"], healthData);
toast.success(
"Anthropic credentials saved. Configure models in the Settings page.",
);
setOpen(false);
},
});
toast.message("Anthropic successfully configured", {
description: "You can now access the provided language models.",
duration: Infinity,
closeButton: true,
icon: <AnthropicLogo className="w-4 h-4 text-[#D97757]" />,
action: {
label: "Settings",
onClick: () => {
router.push("/settings?focusLlmModel=true");
},
},
});
setOpen(false);
},
});
const onSubmit = async (data: AnthropicSettingsFormData) => {
// Clear any previous validation errors
setValidationError(null);
const onSubmit = async (data: AnthropicSettingsFormData) => {
// Clear any previous validation errors
setValidationError(null);
// Only validate if a new API key was entered
if (data.apiKey) {
setIsValidating(true);
const result = await validateCredentials();
setIsValidating(false);
// Only validate if a new API key was entered
if (data.apiKey) {
setIsValidating(true);
const result = await validateCredentials();
setIsValidating(false);
if (result.isError) {
setValidationError(result.error);
return;
}
}
if (result.isError) {
setValidationError(result.error);
return;
}
}
const payload: {
anthropic_api_key?: string;
} = {};
const payload: {
anthropic_api_key?: string;
} = {};
// Only include api_key if a value was entered
if (data.apiKey) {
payload.anthropic_api_key = data.apiKey;
}
// Only include api_key if a value was entered
if (data.apiKey) {
payload.anthropic_api_key = data.apiKey;
}
// Submit the update
settingsMutation.mutate(payload);
};
// Submit the update
settingsMutation.mutate(payload);
};
return (
<Dialog open={open} onOpenChange={setOpen}>
<DialogContent className="max-w-2xl">
<FormProvider {...methods}>
<form onSubmit={handleSubmit(onSubmit)} className="grid gap-4">
<DialogHeader className="mb-2">
<DialogTitle className="flex items-center gap-3">
<div className="w-8 h-8 rounded flex items-center justify-center bg-white border">
<AnthropicLogo className="text-black" />
</div>
Anthropic Setup
</DialogTitle>
</DialogHeader>
return (
<Dialog open={open} onOpenChange={setOpen}>
<DialogContent className="max-w-2xl">
<FormProvider {...methods}>
<form onSubmit={handleSubmit(onSubmit)} className="grid gap-4">
<DialogHeader className="mb-2">
<DialogTitle className="flex items-center gap-3">
<div className="w-8 h-8 rounded flex items-center justify-center bg-white border">
<AnthropicLogo className="text-black" />
</div>
Anthropic Setup
</DialogTitle>
</DialogHeader>
<AnthropicSettingsForm
modelsError={validationError}
isLoadingModels={isValidating}
/>
<AnthropicSettingsForm
modelsError={validationError}
isLoadingModels={isValidating}
/>
<AnimatePresence mode="wait">
{settingsMutation.isError && (
<motion.div
key="error"
initial={{ opacity: 0, y: 10 }}
animate={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: -10 }}
>
<p className="rounded-lg border border-destructive p-4">
{settingsMutation.error?.message}
</p>
</motion.div>
)}
</AnimatePresence>
<DialogFooter className="mt-4">
<Button
variant="outline"
type="button"
onClick={() => setOpen(false)}
>
Cancel
</Button>
<Button
type="submit"
disabled={settingsMutation.isPending || isValidating}
>
{settingsMutation.isPending
? "Saving..."
: isValidating
? "Validating..."
: "Save"}
</Button>
</DialogFooter>
</form>
</FormProvider>
</DialogContent>
</Dialog>
);
<AnimatePresence mode="wait">
{settingsMutation.isError && (
<motion.div
key="error"
initial={{ opacity: 0, y: 10 }}
animate={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: -10 }}
>
<p className="rounded-lg border border-destructive p-4">
{settingsMutation.error?.message}
</p>
</motion.div>
)}
</AnimatePresence>
<DialogFooter className="mt-4">
<Button
variant="outline"
type="button"
onClick={() => setOpen(false)}
>
Cancel
</Button>
<Button
type="submit"
disabled={settingsMutation.isPending || isValidating}
>
{settingsMutation.isPending
? "Saving..."
: isValidating
? "Validating..."
: "Save"}
</Button>
</DialogFooter>
</form>
</FormProvider>
</DialogContent>
</Dialog>
);
};
export default AnthropicSettingsDialog;

View file

@ -96,20 +96,10 @@ export const ModelProviders = () => {
const currentEmbeddingProvider =
(settings.knowledge?.embedding_provider as ModelProvider) || "openai";
// Get all provider keys with active providers first
const activeProviders = new Set([
currentLlmProvider,
currentEmbeddingProvider,
]);
const sortedProviderKeys = [
...Array.from(activeProviders),
...allProviderKeys.filter((key) => !activeProviders.has(key)),
];
return (
<>
<div className="grid gap-6 xs:grid-cols-1 md:grid-cols-2 lg:grid-cols-4">
{sortedProviderKeys.map((providerKey) => {
{allProviderKeys.map((providerKey) => {
const {
name,
logo: Logo,
@ -118,7 +108,6 @@ export const ModelProviders = () => {
} = modelProvidersMap[providerKey];
const isLlmProvider = providerKey === currentLlmProvider;
const isEmbeddingProvider = providerKey === currentEmbeddingProvider;
const isCurrentProvider = isLlmProvider || isEmbeddingProvider;
// Check if this specific provider is unhealthy
const hasLlmError = isLlmProvider && health?.llm_error;
@ -161,16 +150,8 @@ export const ModelProviders = () => {
</div>
<CardTitle className="flex flex-row items-center gap-2">
{name}
{isCurrentProvider && (
<span
className={cn(
"h-2 w-2 rounded-full",
isProviderUnhealthy
? "bg-destructive"
: "bg-accent-emerald-foreground",
)}
aria-label={isProviderUnhealthy ? "Error" : "Active"}
/>
{isProviderUnhealthy && (
<span className="h-2 w-2 rounded-full bg-destructive" />
)}
</CardTitle>
</div>

View file

@ -10,150 +10,162 @@ import type { ProviderHealthResponse } from "@/app/api/queries/useProviderHealth
import OllamaLogo from "@/components/icons/ollama-logo";
import { Button } from "@/components/ui/button";
import {
Dialog,
DialogContent,
DialogFooter,
DialogHeader,
DialogTitle,
Dialog,
DialogContent,
DialogFooter,
DialogHeader,
DialogTitle,
} from "@/components/ui/dialog";
import { useAuth } from "@/contexts/auth-context";
import {
OllamaSettingsForm,
type OllamaSettingsFormData,
OllamaSettingsForm,
type OllamaSettingsFormData,
} from "./ollama-settings-form";
import { useRouter } from "next/navigation";
const OllamaSettingsDialog = ({
open,
setOpen,
open,
setOpen,
}: {
open: boolean;
setOpen: (open: boolean) => void;
open: boolean;
setOpen: (open: boolean) => void;
}) => {
const { isAuthenticated, isNoAuthMode } = useAuth();
const queryClient = useQueryClient();
const [isValidating, setIsValidating] = useState(false);
const [validationError, setValidationError] = useState<Error | null>(null);
const { isAuthenticated, isNoAuthMode } = useAuth();
const queryClient = useQueryClient();
const [isValidating, setIsValidating] = useState(false);
const [validationError, setValidationError] = useState<Error | null>(null);
const router = useRouter();
const { data: settings = {} } = useGetSettingsQuery({
enabled: isAuthenticated || isNoAuthMode,
});
const { data: settings = {} } = useGetSettingsQuery({
enabled: isAuthenticated || isNoAuthMode,
});
const isOllamaConfigured = settings.providers?.ollama?.configured === true;
const isOllamaConfigured = settings.providers?.ollama?.configured === true;
const methods = useForm<OllamaSettingsFormData>({
mode: "onSubmit",
defaultValues: {
endpoint: isOllamaConfigured
? settings.providers?.ollama?.endpoint
: "http://localhost:11434",
},
});
const methods = useForm<OllamaSettingsFormData>({
mode: "onSubmit",
defaultValues: {
endpoint: isOllamaConfigured
? settings.providers?.ollama?.endpoint
: "http://localhost:11434",
},
});
const { handleSubmit, watch } = methods;
const endpoint = watch("endpoint");
const { handleSubmit, watch } = methods;
const endpoint = watch("endpoint");
const { refetch: validateCredentials } = useGetOllamaModelsQuery(
{
endpoint: endpoint,
},
{
enabled: false,
},
);
const { refetch: validateCredentials } = useGetOllamaModelsQuery(
{
endpoint: endpoint,
},
{
enabled: false,
},
);
const settingsMutation = useUpdateSettingsMutation({
onSuccess: () => {
// Update provider health cache to healthy since backend validated the setup
const healthData: ProviderHealthResponse = {
status: "healthy",
message: "Provider is configured and working correctly",
provider: "ollama",
};
queryClient.setQueryData(["provider", "health"], healthData);
const settingsMutation = useUpdateSettingsMutation({
onSuccess: () => {
// Update provider health cache to healthy since backend validated the setup
const healthData: ProviderHealthResponse = {
status: "healthy",
message: "Provider is configured and working correctly",
provider: "ollama",
};
queryClient.setQueryData(["provider", "health"], healthData);
toast.success(
"Ollama endpoint saved. Configure models in the Settings page.",
);
setOpen(false);
},
});
toast.message("Ollama successfully configured", {
description:
"You can now access the provided language and embedding models.",
duration: Infinity,
closeButton: true,
icon: <OllamaLogo className="w-4 h-4" />,
action: {
label: "Settings",
onClick: () => {
router.push("/settings?focusLlmModel=true");
},
},
});
setOpen(false);
},
});
const onSubmit = async (data: OllamaSettingsFormData) => {
// Clear any previous validation errors
setValidationError(null);
const onSubmit = async (data: OllamaSettingsFormData) => {
// Clear any previous validation errors
setValidationError(null);
// Validate endpoint by fetching models
setIsValidating(true);
const result = await validateCredentials();
setIsValidating(false);
// Validate endpoint by fetching models
setIsValidating(true);
const result = await validateCredentials();
setIsValidating(false);
if (result.isError) {
setValidationError(result.error);
return;
}
if (result.isError) {
setValidationError(result.error);
return;
}
settingsMutation.mutate({
ollama_endpoint: data.endpoint,
});
};
settingsMutation.mutate({
ollama_endpoint: data.endpoint,
});
};
return (
<Dialog open={open} onOpenChange={setOpen}>
<DialogContent className="max-w-2xl">
<FormProvider {...methods}>
<form onSubmit={handleSubmit(onSubmit)} className="grid gap-4">
<DialogHeader className="mb-2">
<DialogTitle className="flex items-center gap-3">
<div className="w-8 h-8 rounded flex items-center justify-center bg-white border">
<OllamaLogo className="text-black" />
</div>
Ollama Setup
</DialogTitle>
</DialogHeader>
return (
<Dialog open={open} onOpenChange={setOpen}>
<DialogContent className="max-w-2xl">
<FormProvider {...methods}>
<form onSubmit={handleSubmit(onSubmit)} className="grid gap-4">
<DialogHeader className="mb-2">
<DialogTitle className="flex items-center gap-3">
<div className="w-8 h-8 rounded flex items-center justify-center bg-white border">
<OllamaLogo className="text-black" />
</div>
Ollama Setup
</DialogTitle>
</DialogHeader>
<OllamaSettingsForm
modelsError={validationError}
isLoadingModels={isValidating}
/>
<OllamaSettingsForm
modelsError={validationError}
isLoadingModels={isValidating}
/>
<AnimatePresence mode="wait">
{settingsMutation.isError && (
<motion.div
key="error"
initial={{ opacity: 0, y: 10 }}
animate={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: -10 }}
>
<p className="rounded-lg border border-destructive p-4">
{settingsMutation.error?.message}
</p>
</motion.div>
)}
</AnimatePresence>
<DialogFooter className="mt-4">
<Button
variant="outline"
type="button"
onClick={() => setOpen(false)}
>
Cancel
</Button>
<Button
type="submit"
disabled={settingsMutation.isPending || isValidating}
>
{settingsMutation.isPending
? "Saving..."
: isValidating
? "Validating..."
: "Save"}
</Button>
</DialogFooter>
</form>
</FormProvider>
</DialogContent>
</Dialog>
);
<AnimatePresence mode="wait">
{settingsMutation.isError && (
<motion.div
key="error"
initial={{ opacity: 0, y: 10 }}
animate={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: -10 }}
>
<p className="rounded-lg border border-destructive p-4">
{settingsMutation.error?.message}
</p>
</motion.div>
)}
</AnimatePresence>
<DialogFooter className="mt-4">
<Button
variant="outline"
type="button"
onClick={() => setOpen(false)}
>
Cancel
</Button>
<Button
type="submit"
disabled={settingsMutation.isPending || isValidating}
>
{settingsMutation.isPending
? "Saving..."
: isValidating
? "Validating..."
: "Save"}
</Button>
</DialogFooter>
</form>
</FormProvider>
</DialogContent>
</Dialog>
);
};
export default OllamaSettingsDialog;

View file

@ -9,150 +9,162 @@ import type { ProviderHealthResponse } from "@/app/api/queries/useProviderHealth
import OpenAILogo from "@/components/icons/openai-logo";
import { Button } from "@/components/ui/button";
import {
Dialog,
DialogContent,
DialogFooter,
DialogHeader,
DialogTitle,
Dialog,
DialogContent,
DialogFooter,
DialogHeader,
DialogTitle,
} from "@/components/ui/dialog";
import {
OpenAISettingsForm,
type OpenAISettingsFormData,
OpenAISettingsForm,
type OpenAISettingsFormData,
} from "./openai-settings-form";
import { useRouter } from "next/navigation";
const OpenAISettingsDialog = ({
open,
setOpen,
open,
setOpen,
}: {
open: boolean;
setOpen: (open: boolean) => void;
open: boolean;
setOpen: (open: boolean) => void;
}) => {
const queryClient = useQueryClient();
const [isValidating, setIsValidating] = useState(false);
const [validationError, setValidationError] = useState<Error | null>(null);
const queryClient = useQueryClient();
const [isValidating, setIsValidating] = useState(false);
const [validationError, setValidationError] = useState<Error | null>(null);
const router = useRouter();
const methods = useForm<OpenAISettingsFormData>({
mode: "onSubmit",
defaultValues: {
apiKey: "",
},
});
const methods = useForm<OpenAISettingsFormData>({
mode: "onSubmit",
defaultValues: {
apiKey: "",
},
});
const { handleSubmit, watch } = methods;
const apiKey = watch("apiKey");
const { handleSubmit, watch } = methods;
const apiKey = watch("apiKey");
const { refetch: validateCredentials } = useGetOpenAIModelsQuery(
{
apiKey: apiKey,
},
{
enabled: false,
},
);
const { refetch: validateCredentials } = useGetOpenAIModelsQuery(
{
apiKey: apiKey,
},
{
enabled: false,
},
);
const settingsMutation = useUpdateSettingsMutation({
onSuccess: () => {
// Update provider health cache to healthy since backend validated the setup
const healthData: ProviderHealthResponse = {
status: "healthy",
message: "Provider is configured and working correctly",
provider: "openai",
};
queryClient.setQueryData(["provider", "health"], healthData);
const settingsMutation = useUpdateSettingsMutation({
onSuccess: () => {
// Update provider health cache to healthy since backend validated the setup
const healthData: ProviderHealthResponse = {
status: "healthy",
message: "Provider is configured and working correctly",
provider: "openai",
};
queryClient.setQueryData(["provider", "health"], healthData);
toast.success(
"OpenAI credentials saved. Configure models in the Settings page.",
);
setOpen(false);
},
});
toast.message("OpenAI successfully configured", {
description:
"You can now access the provided language and embedding models.",
duration: Infinity,
closeButton: true,
icon: <OpenAILogo className="w-4 h-4" />,
action: {
label: "Settings",
onClick: () => {
router.push("/settings?focusLlmModel=true");
},
},
});
setOpen(false);
},
});
const onSubmit = async (data: OpenAISettingsFormData) => {
// Clear any previous validation errors
setValidationError(null);
const onSubmit = async (data: OpenAISettingsFormData) => {
// Clear any previous validation errors
setValidationError(null);
// Only validate if a new API key was entered
if (data.apiKey) {
setIsValidating(true);
const result = await validateCredentials();
setIsValidating(false);
// Only validate if a new API key was entered
if (data.apiKey) {
setIsValidating(true);
const result = await validateCredentials();
setIsValidating(false);
if (result.isError) {
setValidationError(result.error);
return;
}
}
if (result.isError) {
setValidationError(result.error);
return;
}
}
const payload: {
openai_api_key?: string;
} = {};
const payload: {
openai_api_key?: string;
} = {};
// Only include api_key if a value was entered
if (data.apiKey) {
payload.openai_api_key = data.apiKey;
}
// Only include api_key if a value was entered
if (data.apiKey) {
payload.openai_api_key = data.apiKey;
}
// Submit the update
settingsMutation.mutate(payload);
};
// Submit the update
settingsMutation.mutate(payload);
};
return (
<Dialog open={open} onOpenChange={setOpen}>
<DialogContent className="max-w-2xl">
<FormProvider {...methods}>
<form onSubmit={handleSubmit(onSubmit)} className="grid gap-4">
<DialogHeader className="mb-2">
<DialogTitle className="flex items-center gap-3">
<div className="w-8 h-8 rounded flex items-center justify-center bg-white border">
<OpenAILogo className="text-black" />
</div>
OpenAI Setup
</DialogTitle>
</DialogHeader>
return (
<Dialog open={open} onOpenChange={setOpen}>
<DialogContent className="max-w-2xl">
<FormProvider {...methods}>
<form onSubmit={handleSubmit(onSubmit)} className="grid gap-4">
<DialogHeader className="mb-2">
<DialogTitle className="flex items-center gap-3">
<div className="w-8 h-8 rounded flex items-center justify-center bg-white border">
<OpenAILogo className="text-black" />
</div>
OpenAI Setup
</DialogTitle>
</DialogHeader>
<OpenAISettingsForm
modelsError={validationError}
isLoadingModels={isValidating}
/>
<OpenAISettingsForm
modelsError={validationError}
isLoadingModels={isValidating}
/>
<AnimatePresence mode="wait">
{settingsMutation.isError && (
<motion.div
key="error"
initial={{ opacity: 0, y: 10 }}
animate={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: -10 }}
>
<p className="rounded-lg border border-destructive p-4">
{settingsMutation.error?.message}
</p>
</motion.div>
)}
</AnimatePresence>
<DialogFooter className="mt-4">
<Button
variant="outline"
type="button"
onClick={() => setOpen(false)}
>
Cancel
</Button>
<Button
type="submit"
disabled={settingsMutation.isPending || isValidating}
>
{settingsMutation.isPending
? "Saving..."
: isValidating
? "Validating..."
: "Save"}
</Button>
</DialogFooter>
</form>
</FormProvider>
</DialogContent>
</Dialog>
);
<AnimatePresence mode="wait">
{settingsMutation.isError && (
<motion.div
key="error"
initial={{ opacity: 0, y: 10 }}
animate={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: -10 }}
>
<p className="rounded-lg border border-destructive p-4">
{settingsMutation.error?.message}
</p>
</motion.div>
)}
</AnimatePresence>
<DialogFooter className="mt-4">
<Button
variant="outline"
type="button"
onClick={() => setOpen(false)}
>
Cancel
</Button>
<Button
type="submit"
disabled={settingsMutation.isPending || isValidating}
>
{settingsMutation.isPending
? "Saving..."
: isValidating
? "Validating..."
: "Save"}
</Button>
</DialogFooter>
</form>
</FormProvider>
</DialogContent>
</Dialog>
);
};
export default OpenAISettingsDialog;

View file

@ -9,158 +9,171 @@ import type { ProviderHealthResponse } from "@/app/api/queries/useProviderHealth
import IBMLogo from "@/components/icons/ibm-logo";
import { Button } from "@/components/ui/button";
import {
Dialog,
DialogContent,
DialogFooter,
DialogHeader,
DialogTitle,
Dialog,
DialogContent,
DialogFooter,
DialogHeader,
DialogTitle,
} from "@/components/ui/dialog";
import {
WatsonxSettingsForm,
type WatsonxSettingsFormData,
WatsonxSettingsForm,
type WatsonxSettingsFormData,
} from "./watsonx-settings-form";
import { useRouter } from "next/navigation";
const WatsonxSettingsDialog = ({
open,
setOpen,
open,
setOpen,
}: {
open: boolean;
setOpen: (open: boolean) => void;
open: boolean;
setOpen: (open: boolean) => void;
}) => {
const queryClient = useQueryClient();
const [isValidating, setIsValidating] = useState(false);
const [validationError, setValidationError] = useState<Error | null>(null);
const queryClient = useQueryClient();
const [isValidating, setIsValidating] = useState(false);
const [validationError, setValidationError] = useState<Error | null>(null);
const router = useRouter();
const methods = useForm<WatsonxSettingsFormData>({
mode: "onSubmit",
defaultValues: {
endpoint: "https://us-south.ml.cloud.ibm.com",
apiKey: "",
projectId: "",
},
});
const methods = useForm<WatsonxSettingsFormData>({
mode: "onSubmit",
defaultValues: {
endpoint: "https://us-south.ml.cloud.ibm.com",
apiKey: "",
projectId: "",
},
});
const { handleSubmit, watch } = methods;
const endpoint = watch("endpoint");
const apiKey = watch("apiKey");
const projectId = watch("projectId");
const { handleSubmit, watch } = methods;
const endpoint = watch("endpoint");
const apiKey = watch("apiKey");
const projectId = watch("projectId");
const { refetch: validateCredentials } = useGetIBMModelsQuery(
{
endpoint: endpoint,
apiKey: apiKey,
projectId: projectId,
},
{
enabled: false,
},
);
const { refetch: validateCredentials } = useGetIBMModelsQuery(
{
endpoint: endpoint,
apiKey: apiKey,
projectId: projectId,
},
{
enabled: false,
},
);
const settingsMutation = useUpdateSettingsMutation({
onSuccess: () => {
// Update provider health cache to healthy since backend validated the setup
const healthData: ProviderHealthResponse = {
status: "healthy",
message: "Provider is configured and working correctly",
provider: "watsonx",
};
queryClient.setQueryData(["provider", "health"], healthData);
toast.success(
"watsonx credentials saved. Configure models in the Settings page.",
);
setOpen(false);
},
});
const settingsMutation = useUpdateSettingsMutation({
onSuccess: () => {
// Update provider health cache to healthy since backend validated the setup
const healthData: ProviderHealthResponse = {
status: "healthy",
message: "Provider is configured and working correctly",
provider: "watsonx",
};
queryClient.setQueryData(["provider", "health"], healthData);
const onSubmit = async (data: WatsonxSettingsFormData) => {
// Clear any previous validation errors
setValidationError(null);
toast.message("IBM watsonx.ai successfully configured", {
description:
"You can now access the provided language and embedding models.",
duration: Infinity,
closeButton: true,
icon: <IBMLogo className="w-4 h-4 text-[#1063FE]" />,
action: {
label: "Settings",
onClick: () => {
router.push("/settings?focusLlmModel=true");
},
},
});
setOpen(false);
},
});
// Validate credentials by fetching models
setIsValidating(true);
const result = await validateCredentials();
setIsValidating(false);
const onSubmit = async (data: WatsonxSettingsFormData) => {
// Clear any previous validation errors
setValidationError(null);
if (result.isError) {
setValidationError(result.error);
return;
}
// Validate credentials by fetching models
setIsValidating(true);
const result = await validateCredentials();
setIsValidating(false);
const payload: {
watsonx_endpoint: string;
watsonx_api_key?: string;
watsonx_project_id: string;
} = {
watsonx_endpoint: data.endpoint,
watsonx_project_id: data.projectId,
};
if (result.isError) {
setValidationError(result.error);
return;
}
// Only include api_key if a value was entered
if (data.apiKey) {
payload.watsonx_api_key = data.apiKey;
}
const payload: {
watsonx_endpoint: string;
watsonx_api_key?: string;
watsonx_project_id: string;
} = {
watsonx_endpoint: data.endpoint,
watsonx_project_id: data.projectId,
};
// Submit the update
settingsMutation.mutate(payload);
};
// Only include api_key if a value was entered
if (data.apiKey) {
payload.watsonx_api_key = data.apiKey;
}
return (
<Dialog open={open} onOpenChange={setOpen}>
<DialogContent autoFocus={false} className="max-w-2xl">
<FormProvider {...methods}>
<form onSubmit={handleSubmit(onSubmit)} className="grid gap-4">
<DialogHeader className="mb-2">
<DialogTitle className="flex items-center gap-3">
<div className="w-8 h-8 rounded flex items-center justify-center bg-white border">
<IBMLogo className="text-black" />
</div>
IBM watsonx.ai Setup
</DialogTitle>
</DialogHeader>
// Submit the update
settingsMutation.mutate(payload);
};
<WatsonxSettingsForm
modelsError={validationError}
isLoadingModels={isValidating}
/>
return (
<Dialog open={open} onOpenChange={setOpen}>
<DialogContent autoFocus={false} className="max-w-2xl">
<FormProvider {...methods}>
<form onSubmit={handleSubmit(onSubmit)} className="grid gap-4">
<DialogHeader className="mb-2">
<DialogTitle className="flex items-center gap-3">
<div className="w-8 h-8 rounded flex items-center justify-center bg-white border">
<IBMLogo className="text-black" />
</div>
IBM watsonx.ai Setup
</DialogTitle>
</DialogHeader>
<AnimatePresence mode="wait">
{settingsMutation.isError && (
<motion.div
key="error"
initial={{ opacity: 0, y: 10 }}
animate={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: -10 }}
>
<p className="rounded-lg border border-destructive p-4">
{settingsMutation.error?.message}
</p>
</motion.div>
)}
</AnimatePresence>
<DialogFooter className="mt-4">
<Button
variant="outline"
type="button"
onClick={() => setOpen(false)}
>
Cancel
</Button>
<Button
type="submit"
disabled={settingsMutation.isPending || isValidating}
>
{settingsMutation.isPending
? "Saving..."
: isValidating
? "Validating..."
: "Save"}
</Button>
</DialogFooter>
</form>
</FormProvider>
</DialogContent>
</Dialog>
);
<WatsonxSettingsForm
modelsError={validationError}
isLoadingModels={isValidating}
/>
<AnimatePresence mode="wait">
{settingsMutation.isError && (
<motion.div
key="error"
initial={{ opacity: 0, y: 10 }}
animate={{ opacity: 1, y: 0 }}
exit={{ opacity: 0, y: -10 }}
>
<p className="rounded-lg border border-destructive p-4">
{settingsMutation.error?.message}
</p>
</motion.div>
)}
</AnimatePresence>
<DialogFooter className="mt-4">
<Button
variant="outline"
type="button"
onClick={() => setOpen(false)}
>
Cancel
</Button>
<Button
type="submit"
disabled={settingsMutation.isPending || isValidating}
>
{settingsMutation.isPending
? "Saving..."
: isValidating
? "Validating..."
: "Save"}
</Button>
</DialogFooter>
</form>
</FormProvider>
</DialogContent>
</Dialog>
);
};
export default WatsonxSettingsDialog;

File diff suppressed because it is too large Load diff

View file

@ -366,7 +366,7 @@ export default function UploadProviderPage() {
Back
</Button>
<Tooltip>
<TooltipTrigger>
<TooltipTrigger asChild>
<Button
className="bg-foreground text-background hover:bg-foreground/90 font-semibold"
variant={!hasSelectedFiles ? "secondary" : undefined}

View file

@ -196,20 +196,45 @@ export class OneDriveHandler {
},
success: (response: any) => {
const newFiles: CloudFile[] =
response.value?.map((item: any, index: number) => ({
id: item.id,
name:
item.name ||
`${this.getProviderName()} File ${index + 1} (${item.id.slice(
-8,
)})`,
mimeType: item.file?.mimeType || "application/octet-stream",
webUrl: item.webUrl || "",
downloadUrl: item["@microsoft.graph.downloadUrl"] || "",
size: item.size,
modifiedTime: item.lastModifiedDateTime,
isFolder: !!item.folder,
})) || [];
response.value?.map((item: any) => {
// Extract mimeType from file object or infer from name
let mimeType = item.file?.mimeType;
if (!mimeType && item.name) {
// Infer from extension if mimeType not provided
const ext = item.name.split('.').pop()?.toLowerCase();
const mimeTypes: { [key: string]: string } = {
pdf: 'application/pdf',
doc: 'application/msword',
docx: 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
xls: 'application/vnd.ms-excel',
xlsx: 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
ppt: 'application/vnd.ms-powerpoint',
pptx: 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
txt: 'text/plain',
csv: 'text/csv',
json: 'application/json',
xml: 'application/xml',
html: 'text/html',
jpg: 'image/jpeg',
jpeg: 'image/jpeg',
png: 'image/png',
gif: 'image/gif',
svg: 'image/svg+xml',
};
mimeType = mimeTypes[ext || ''] || 'application/octet-stream';
}
return {
id: item.id,
name: item.name || `${this.getProviderName()} File`,
mimeType: mimeType || "application/octet-stream",
webUrl: item.webUrl || "",
downloadUrl: item["@microsoft.graph.downloadUrl"] || "",
size: item.size,
modifiedTime: item.lastModifiedDateTime,
isFolder: !!item.folder,
};
}) || [];
onFileSelected(newFiles);
},

View file

@ -8,7 +8,7 @@ const OneDriveLogo = ({ className }: { className?: string }) => (
className={className}
>
<title>OneDrive Logo</title>
<g clip-path="url(#clip0_3016_367)">
<g clipPath="url(#clip0_3016_367)">
<path
d="M5.2316 2.32803C2.88332 2.3281 1.128 4.25034 0.99585 6.39175C1.07765 6.85315 1.34653 7.7643 1.76759 7.71751C2.29391 7.65902 3.61947 7.71751 4.75008 5.67068C5.57599 4.17546 7.27498 2.328 5.2316 2.32803Z"
fill="url(#paint0_radial_3016_367)"
@ -20,7 +20,7 @@ const OneDriveLogo = ({ className }: { className?: string }) => (
<path
d="M4.68864 3.12741C3.89927 4.37718 2.83674 6.16798 2.47813 6.7315C2.05185 7.40136 0.922937 7.11678 1.01646 6.15663C1.00724 6.23457 1.00016 6.31315 0.995274 6.39226C0.840839 8.89029 2.82143 10.9648 5.28604 10.9648C8.00238 10.9648 14.4806 7.58038 13.825 4.18931C13.134 2.19599 11.1918 0.766266 8.99072 0.766266C6.78965 0.766266 5.37899 2.03436 4.68864 3.12741Z"
fill="url(#paint2_radial_3016_367)"
fill-opacity="0.4"
fillOpacity="0.4"
/>
<path
d="M4.68864 3.12741C3.89927 4.37718 2.83674 6.16798 2.47813 6.7315C2.05185 7.40136 0.922937 7.11678 1.01646 6.15663C1.00724 6.23457 1.00016 6.31315 0.995274 6.39226C0.840839 8.89029 2.82143 10.9648 5.28604 10.9648C8.00238 10.9648 14.4806 7.58038 13.825 4.18931C13.134 2.19599 11.1918 0.766266 8.99072 0.766266C6.78965 0.766266 5.37899 2.03436 4.68864 3.12741Z"
@ -29,12 +29,12 @@ const OneDriveLogo = ({ className }: { className?: string }) => (
<path
d="M4.68864 3.12741C3.89927 4.37718 2.83674 6.16798 2.47813 6.7315C2.05185 7.40136 0.922937 7.11678 1.01646 6.15663C1.00724 6.23457 1.00016 6.31315 0.995274 6.39226C0.840839 8.89029 2.82143 10.9648 5.28604 10.9648C8.00238 10.9648 14.4806 7.58038 13.825 4.18931C13.134 2.19599 11.1918 0.766266 8.99072 0.766266C6.78965 0.766266 5.37899 2.03436 4.68864 3.12741Z"
fill="url(#paint4_radial_3016_367)"
fill-opacity="0.6"
fillOpacity="0.6"
/>
<path
d="M4.68864 3.12741C3.89927 4.37718 2.83674 6.16798 2.47813 6.7315C2.05185 7.40136 0.922937 7.11678 1.01646 6.15663C1.00724 6.23457 1.00016 6.31315 0.995274 6.39226C0.840839 8.89029 2.82143 10.9648 5.28604 10.9648C8.00238 10.9648 14.4806 7.58038 13.825 4.18931C13.134 2.19599 11.1918 0.766266 8.99072 0.766266C6.78965 0.766266 5.37899 2.03436 4.68864 3.12741Z"
fill="url(#paint5_radial_3016_367)"
fill-opacity="0.9"
fillOpacity="0.9"
/>
<path
d="M5.24634 10.9659C5.24634 10.9659 11.7322 10.9786 12.8323 10.9786C14.8288 10.9786 16.3467 9.34866 16.3468 7.44669C16.3468 5.54468 14.7983 3.92459 12.8323 3.92459C10.8663 3.92459 9.73412 5.39542 8.88374 7.00089C7.8873 8.88221 6.61615 10.9433 5.24634 10.9659Z"
@ -43,12 +43,12 @@ const OneDriveLogo = ({ className }: { className?: string }) => (
<path
d="M5.24634 10.9659C5.24634 10.9659 11.7322 10.9786 12.8323 10.9786C14.8288 10.9786 16.3467 9.34866 16.3468 7.44669C16.3468 5.54468 14.7983 3.92459 12.8323 3.92459C10.8663 3.92459 9.73412 5.39542 8.88374 7.00089C7.8873 8.88221 6.61615 10.9433 5.24634 10.9659Z"
fill="url(#paint7_radial_3016_367)"
fill-opacity="0.4"
fillOpacity="0.4"
/>
<path
d="M5.24634 10.9659C5.24634 10.9659 11.7322 10.9786 12.8323 10.9786C14.8288 10.9786 16.3467 9.34866 16.3468 7.44669C16.3468 5.54468 14.7983 3.92459 12.8323 3.92459C10.8663 3.92459 9.73412 5.39542 8.88374 7.00089C7.8873 8.88221 6.61615 10.9433 5.24634 10.9659Z"
fill="url(#paint8_radial_3016_367)"
fill-opacity="0.9"
fillOpacity="0.9"
/>
</g>
<defs>
@ -60,8 +60,8 @@ const OneDriveLogo = ({ className }: { className?: string }) => (
gradientUnits="userSpaceOnUse"
gradientTransform="translate(1.28709 2.88928) rotate(50.1526) scale(4.84121 8.03004)"
>
<stop stop-color="#4894FE" />
<stop offset="0.695072" stop-color="#0934B3" />
<stop stopColor="#4894FE" />
<stop offset="0.695072" stopColor="#0934B3" />
</radialGradient>
<radialGradient
id="paint1_radial_3016_367"
@ -71,8 +71,8 @@ const OneDriveLogo = ({ className }: { className?: string }) => (
gradientUnits="userSpaceOnUse"
gradientTransform="translate(14.2836 -2.68456) rotate(130.923) scale(20.8177 15.4261)"
>
<stop offset="0.165327" stop-color="#23C0FE" />
<stop offset="0.534" stop-color="#1C91FF" />
<stop offset="0.165327" stopColor="#23C0FE" />
<stop offset="0.534" stopColor="#1C91FF" />
</radialGradient>
<radialGradient
id="paint2_radial_3016_367"
@ -82,8 +82,8 @@ const OneDriveLogo = ({ className }: { className?: string }) => (
gradientUnits="userSpaceOnUse"
gradientTransform="translate(4.42852 3.16495) rotate(-139.986) scale(4.23243 9.68892)"
>
<stop stop-color="white" />
<stop offset="0.660528" stop-color="#ADC0FF" stop-opacity="0" />
<stop stopColor="white" />
<stop offset="0.660528" stopColor="#ADC0FF" stopOpacity="0" />
</radialGradient>
<radialGradient
id="paint3_radial_3016_367"
@ -93,8 +93,8 @@ const OneDriveLogo = ({ className }: { className?: string }) => (
gradientUnits="userSpaceOnUse"
gradientTransform="translate(9.03076 8.16737) rotate(-139.764) scale(4.77056 7.24512)"
>
<stop stop-color="#033ACC" />
<stop offset="1" stop-color="#368EFF" stop-opacity="0" />
<stop stopColor="#033ACC" />
<stop offset="1" stopColor="#368EFF" stopOpacity="0" />
</radialGradient>
<radialGradient
id="paint4_radial_3016_367"
@ -104,8 +104,8 @@ const OneDriveLogo = ({ className }: { className?: string }) => (
gradientUnits="userSpaceOnUse"
gradientTransform="translate(4.14837 0.44361) rotate(66.5713) scale(10.4677 11.3005)"
>
<stop offset="0.592618" stop-color="#3464E3" stop-opacity="0" />
<stop offset="1" stop-color="#033ACC" />
<stop offset="0.592618" stopColor="#3464E3" stopOpacity="0" />
<stop offset="1" stopColor="#033ACC" />
</radialGradient>
<radialGradient
id="paint5_radial_3016_367"
@ -115,8 +115,8 @@ const OneDriveLogo = ({ className }: { className?: string }) => (
gradientUnits="userSpaceOnUse"
gradientTransform="translate(14.1157 -1.59739) rotate(135) scale(15.3977 24.123)"
>
<stop stop-color="#4BFDE8" />
<stop offset="0.543937" stop-color="#4BFDE8" stop-opacity="0" />
<stop stopColor="#4BFDE8" />
<stop offset="0.543937" stopColor="#4BFDE8" stopOpacity="0" />
</radialGradient>
<linearGradient
id="paint6_linear_3016_367"
@ -126,8 +126,8 @@ const OneDriveLogo = ({ className }: { className?: string }) => (
y2="4.00825"
gradientUnits="userSpaceOnUse"
>
<stop stop-color="#0086FF" />
<stop offset="0.49" stop-color="#00BBFF" />
<stop stopColor="#0086FF" />
<stop offset="0.49" stopColor="#00BBFF" />
</linearGradient>
<radialGradient
id="paint7_radial_3016_367"
@ -137,8 +137,8 @@ const OneDriveLogo = ({ className }: { className?: string }) => (
gradientUnits="userSpaceOnUse"
gradientTransform="translate(7.16132 4.75417) rotate(21.6324) scale(6.97728 13.2126)"
>
<stop stop-color="white" />
<stop offset="0.785262" stop-color="white" stop-opacity="0" />
<stop stopColor="white" />
<stop offset="0.785262" stopColor="white" stopOpacity="0" />
</radialGradient>
<radialGradient
id="paint8_radial_3016_367"
@ -148,8 +148,8 @@ const OneDriveLogo = ({ className }: { className?: string }) => (
gradientUnits="userSpaceOnUse"
gradientTransform="translate(16.1298 3.37785) rotate(139.243) scale(9.56565 9.59808)"
>
<stop stop-color="#4BFDE8" />
<stop offset="0.584724" stop-color="#4BFDE8" stop-opacity="0" />
<stop stopColor="#4BFDE8" />
<stop offset="0.584724" stopColor="#4BFDE8" stopOpacity="0" />
</radialGradient>
<clipPath id="clip0_3016_367">
<rect

View file

@ -8,7 +8,7 @@ const SharePointLogo = ({ className }: { className?: string }) => (
className={className}
>
<title>SharePoint Logo</title>
<g clip-path="url(#clip0_3016_409)">
<g clipPath="url(#clip0_3016_409)">
<path
d="M6.1335 9.6C8.78446 9.6 10.9335 7.45096 10.9335 4.8C10.9335 2.14903 8.78446 0 6.1335 0C3.48254 0 1.3335 2.14903 1.3335 4.8C1.3335 7.45096 3.48254 9.6 6.1335 9.6Z"
fill="url(#paint0_linear_3016_409)"
@ -16,17 +16,17 @@ const SharePointLogo = ({ className }: { className?: string }) => (
<path
d="M6.1335 9.6C8.78446 9.6 10.9335 7.45096 10.9335 4.8C10.9335 2.14903 8.78446 0 6.1335 0C3.48254 0 1.3335 2.14903 1.3335 4.8C1.3335 7.45096 3.48254 9.6 6.1335 9.6Z"
fill="url(#paint1_radial_3016_409)"
fill-opacity="0.2"
fillOpacity="0.2"
/>
<path
d="M6.1335 9.6C8.78446 9.6 10.9335 7.45096 10.9335 4.8C10.9335 2.14903 8.78446 0 6.1335 0C3.48254 0 1.3335 2.14903 1.3335 4.8C1.3335 7.45096 3.48254 9.6 6.1335 9.6Z"
fill="url(#paint2_radial_3016_409)"
fill-opacity="0.31"
fillOpacity="0.31"
/>
<path
d="M6.1335 9.6C8.78446 9.6 10.9335 7.45096 10.9335 4.8C10.9335 2.14903 8.78446 0 6.1335 0C3.48254 0 1.3335 2.14903 1.3335 4.8C1.3335 7.45096 3.48254 9.6 6.1335 9.6Z"
fill="url(#paint3_radial_3016_409)"
fill-opacity="0.7"
fillOpacity="0.7"
/>
<path
d="M10.5117 12.8C12.7209 12.8 14.5117 11.0091 14.5117 8.8C14.5117 6.59088 12.7209 4.8 10.5117 4.8C8.3026 4.8 6.51172 6.59088 6.51172 8.8C6.51172 11.0091 8.3026 12.8 10.5117 12.8Z"
@ -35,12 +35,12 @@ const SharePointLogo = ({ className }: { className?: string }) => (
<path
d="M10.5117 12.8C12.7209 12.8 14.5117 11.0091 14.5117 8.8C14.5117 6.59088 12.7209 4.8 10.5117 4.8C8.3026 4.8 6.51172 6.59088 6.51172 8.8C6.51172 11.0091 8.3026 12.8 10.5117 12.8Z"
fill="url(#paint5_radial_3016_409)"
fill-opacity="0.5"
fillOpacity="0.5"
/>
<path
d="M10.5117 12.8C12.7209 12.8 14.5117 11.0091 14.5117 8.8C14.5117 6.59088 12.7209 4.8 10.5117 4.8C8.3026 4.8 6.51172 6.59088 6.51172 8.8C6.51172 11.0091 8.3026 12.8 10.5117 12.8Z"
fill="url(#paint6_radial_3016_409)"
fill-opacity="0.7"
fillOpacity="0.7"
/>
<path
d="M6.7335 16C8.61126 16 10.1335 14.4778 10.1335 12.6C10.1335 10.7222 8.61126 9.2 6.7335 9.2C4.85574 9.2 3.3335 10.7222 3.3335 12.6C3.3335 14.4778 4.85574 16 6.7335 16Z"
@ -49,7 +49,7 @@ const SharePointLogo = ({ className }: { className?: string }) => (
<path
d="M6.7335 16C8.61126 16 10.1335 14.4778 10.1335 12.6C10.1335 10.7222 8.61126 9.2 6.7335 9.2C4.85574 9.2 3.3335 10.7222 3.3335 12.6C3.3335 14.4778 4.85574 16 6.7335 16Z"
fill="url(#paint8_linear_3016_409)"
fill-opacity="0.32"
fillOpacity="0.32"
/>
<path
d="M5.23354 7.60001H1.43354C0.715575 7.60001 0.133545 8.18204 0.133545 8.90001V12.7C0.133545 13.418 0.715575 14 1.43354 14H5.23354C5.95151 14 6.53354 13.418 6.53354 12.7V8.90001C6.53354 8.18204 5.95151 7.60001 5.23354 7.60001Z"
@ -58,7 +58,7 @@ const SharePointLogo = ({ className }: { className?: string }) => (
<path
d="M5.23354 7.60001H1.43354C0.715575 7.60001 0.133545 8.18204 0.133545 8.90001V12.7C0.133545 13.418 0.715575 14 1.43354 14H5.23354C5.95151 14 6.53354 13.418 6.53354 12.7V8.90001C6.53354 8.18204 5.95151 7.60001 5.23354 7.60001Z"
fill="url(#paint10_radial_3016_409)"
fill-opacity="0.6"
fillOpacity="0.6"
/>
<path
d="M1.95581 11.8734L2.64917 11.523C2.72733 11.676 2.82929 11.7887 2.95505 11.8611C3.08249 11.9335 3.22185 11.9697 3.37309 11.9697C3.54133 11.9697 3.66965 11.9368 3.75801 11.871C3.84641 11.8036 3.89057 11.7024 3.89057 11.5675C3.89057 11.4622 3.84809 11.3733 3.76313 11.301C3.67817 11.2269 3.52777 11.171 3.31193 11.1332C2.90069 11.0608 2.60157 10.9341 2.41465 10.7531C2.22941 10.5722 2.13679 10.3468 2.13679 10.077C2.13679 9.74136 2.25915 9.4732 2.50387 9.27248C2.74857 9.0718 3.07145 8.97144 3.47253 8.97144C3.74273 8.97144 3.98065 9.02492 4.18629 9.13184C4.39189 9.23876 4.55505 9.39176 4.67569 9.59084L3.99765 9.92892C3.92285 9.81704 3.84213 9.73644 3.75549 9.68708C3.66881 9.63608 3.56005 9.61056 3.42917 9.61056C3.27285 9.61056 3.15389 9.64348 3.07233 9.70928C2.99245 9.77508 2.95249 9.86064 2.95249 9.96592C2.95249 10.0564 2.99073 10.1362 3.06721 10.2053C3.14537 10.2727 3.30173 10.3278 3.53625 10.3706C3.93053 10.443 4.22449 10.5746 4.41825 10.7654C4.61369 10.9546 4.71137 11.194 4.71137 11.4836C4.71137 11.8356 4.59497 12.1145 4.36217 12.3201C4.12933 12.5258 3.79713 12.6286 3.36545 12.6286C3.05277 12.6286 2.77065 12.5628 2.51916 12.4312C2.26935 12.2979 2.08157 12.112 1.95581 11.8734Z"
@ -78,9 +78,9 @@ const SharePointLogo = ({ className }: { className?: string }) => (
y2="9.6"
gradientUnits="userSpaceOnUse"
>
<stop stop-color="#00E3DF" />
<stop offset="0.410156" stop-color="#0097A8" />
<stop offset="1" stop-color="#007791" />
<stop stopColor="#00E3DF" />
<stop offset="0.410156" stopColor="#0097A8" />
<stop offset="1" stopColor="#007791" />
</linearGradient>
<radialGradient
id="paint1_radial_3016_409"
@ -90,9 +90,9 @@ const SharePointLogo = ({ className }: { className?: string }) => (
gradientUnits="userSpaceOnUse"
gradientTransform="translate(7.60222 10.9279) rotate(-112.448) scale(7.37044 13.2516)"
>
<stop offset="0.28573" stop-color="#003B5D" />
<stop offset="0.612265" stop-color="#004A6C" stop-opacity="0.688298" />
<stop offset="0.968041" stop-color="#006F94" stop-opacity="0" />
<stop offset="0.28573" stopColor="#003B5D" />
<stop offset="0.612265" stopColor="#004A6C" stopOpacity="0.688298" />
<stop offset="0.968041" stopColor="#006F94" stopOpacity="0" />
</radialGradient>
<radialGradient
id="paint2_radial_3016_409"
@ -102,9 +102,9 @@ const SharePointLogo = ({ className }: { className?: string }) => (
gradientUnits="userSpaceOnUse"
gradientTransform="translate(7.77166 8.81012) rotate(-112.063) scale(6.22076 11.1709)"
>
<stop offset="0.259744" stop-color="#002A42" />
<stop offset="0.612265" stop-color="#004261" stop-opacity="0.688298" />
<stop offset="0.968041" stop-color="#006F94" stop-opacity="0" />
<stop offset="0.259744" stopColor="#002A42" />
<stop offset="0.612265" stopColor="#004261" stopOpacity="0.688298" />
<stop offset="0.968041" stopColor="#006F94" stopOpacity="0" />
</radialGradient>
<radialGradient
id="paint3_radial_3016_409"
@ -114,8 +114,8 @@ const SharePointLogo = ({ className }: { className?: string }) => (
gradientUnits="userSpaceOnUse"
gradientTransform="translate(8.87294 0.508276) rotate(124.447) scale(5.20428)"
>
<stop stop-color="#78EDFF" />
<stop offset="1" stop-color="#2CCFCA" stop-opacity="0" />
<stop stopColor="#78EDFF" />
<stop offset="1" stopColor="#2CCFCA" stopOpacity="0" />
</radialGradient>
<linearGradient
id="paint4_linear_3016_409"
@ -125,9 +125,9 @@ const SharePointLogo = ({ className }: { className?: string }) => (
y2="12.8"
gradientUnits="userSpaceOnUse"
>
<stop stop-color="#00E3DF" />
<stop offset="0.476427" stop-color="#00A2B8" />
<stop offset="0.945063" stop-color="#00637C" />
<stop stopColor="#00E3DF" />
<stop offset="0.476427" stopColor="#00A2B8" />
<stop offset="0.945063" stopColor="#00637C" />
</linearGradient>
<radialGradient
id="paint5_radial_3016_409"
@ -137,9 +137,9 @@ const SharePointLogo = ({ className }: { className?: string }) => (
gradientUnits="userSpaceOnUse"
gradientTransform="translate(8.22004 12.1333) rotate(-70.8012) scale(4.94148 8.90348)"
>
<stop stop-color="#003B5D" />
<stop offset="0.492035" stop-color="#004C6C" stop-opacity="0.72" />
<stop offset="0.968041" stop-color="#007A86" stop-opacity="0" />
<stop stopColor="#003B5D" />
<stop offset="0.492035" stopColor="#004C6C" stopOpacity="0.72" />
<stop offset="0.968041" stopColor="#007A86" stopOpacity="0" />
</radialGradient>
<radialGradient
id="paint6_radial_3016_409"
@ -149,8 +149,8 @@ const SharePointLogo = ({ className }: { className?: string }) => (
gradientUnits="userSpaceOnUse"
gradientTransform="translate(12.7946 5.22356) rotate(124.447) scale(4.33692)"
>
<stop stop-color="#78EDFF" />
<stop offset="1" stop-color="#2CCFCA" stop-opacity="0" />
<stop stopColor="#78EDFF" />
<stop offset="1" stopColor="#2CCFCA" stopOpacity="0" />
</radialGradient>
<linearGradient
id="paint7_linear_3016_409"
@ -160,9 +160,9 @@ const SharePointLogo = ({ className }: { className?: string }) => (
y2="16.34"
gradientUnits="userSpaceOnUse"
>
<stop offset="0.0534989" stop-color="#75FFF6" />
<stop offset="0.51144" stop-color="#00C7D1" />
<stop offset="0.96002" stop-color="#0096AD" />
<stop offset="0.0534989" stopColor="#75FFF6" />
<stop offset="0.51144" stopColor="#00C7D1" />
<stop offset="0.96002" stopColor="#0096AD" />
</linearGradient>
<linearGradient
id="paint8_linear_3016_409"
@ -172,9 +172,9 @@ const SharePointLogo = ({ className }: { className?: string }) => (
y2="13.4503"
gradientUnits="userSpaceOnUse"
>
<stop offset="0.259744" stop-color="#0E5A5D" />
<stop offset="0.535716" stop-color="#126C6B" stop-opacity="0.688298" />
<stop offset="0.968041" stop-color="#1C948A" stop-opacity="0" />
<stop offset="0.259744" stopColor="#0E5A5D" />
<stop offset="0.535716" stopColor="#126C6B" stopOpacity="0.688298" />
<stop offset="0.968041" stopColor="#1C948A" stopOpacity="0" />
</linearGradient>
<radialGradient
id="paint9_radial_3016_409"
@ -184,8 +184,8 @@ const SharePointLogo = ({ className }: { className?: string }) => (
gradientUnits="userSpaceOnUse"
gradientTransform="translate(0.133545 7.60001) rotate(45) scale(9.05096)"
>
<stop offset="0.0625" stop-color="#00B6BD" />
<stop offset="0.890131" stop-color="#00495C" />
<stop offset="0.0625" stopColor="#00B6BD" />
<stop offset="0.890131" stopColor="#00495C" />
</radialGradient>
<radialGradient
id="paint10_radial_3016_409"
@ -195,8 +195,8 @@ const SharePointLogo = ({ className }: { className?: string }) => (
gradientUnits="userSpaceOnUse"
gradientTransform="translate(3.33354 11.44) rotate(90) scale(4.48 5.1)"
>
<stop offset="0.566964" stop-color="#1E8581" stop-opacity="0" />
<stop offset="0.973806" stop-color="#1ECBE6" />
<stop offset="0.566964" stopColor="#1E8581" stopOpacity="0" />
<stop offset="0.973806" stopColor="#1ECBE6" />
</radialGradient>
<clipPath id="clip0_3016_409">
<rect

View file

@ -0,0 +1,53 @@
import { useEffect, useState } from "react";
/**
* Hook to detect when files are being dragged into the browser window
* @returns isDragging - true when files are being dragged over the window
*/
export function useFileDrag() {
const [isDragging, setIsDragging] = useState(false);
useEffect(() => {
let dragCounter = 0;
const handleDragEnter = (e: DragEvent) => {
// Only detect file drags
if (e.dataTransfer?.types.includes("Files")) {
dragCounter++;
if (dragCounter === 1) {
setIsDragging(true);
}
}
};
const handleDragLeave = () => {
dragCounter--;
if (dragCounter === 0) {
setIsDragging(false);
}
};
const handleDragOver = (e: DragEvent) => {
e.preventDefault();
};
const handleDrop = () => {
dragCounter = 0;
setIsDragging(false);
};
window.addEventListener("dragenter", handleDragEnter);
window.addEventListener("dragleave", handleDragLeave);
window.addEventListener("dragover", handleDragOver);
window.addEventListener("drop", handleDrop);
return () => {
window.removeEventListener("dragenter", handleDragEnter);
window.removeEventListener("dragleave", handleDragLeave);
window.removeEventListener("dragover", handleDragOver);
window.removeEventListener("drop", handleDrop);
};
}, []);
return isDragging;
}

View file

@ -38,6 +38,10 @@ export function useChatStreaming({
limit = 10,
scoreThreshold = 0,
}: SendMessageOptions) => {
// Set up timeout to detect stuck/hanging requests
let timeoutId: NodeJS.Timeout | null = null;
let hasReceivedData = false;
try {
setIsLoading(true);
@ -49,6 +53,20 @@ export function useChatStreaming({
const controller = new AbortController();
streamAbortRef.current = controller;
const thisStreamId = ++streamIdRef.current;
// Set up timeout (60 seconds for initial response, then extended as data comes in)
const startTimeout = () => {
if (timeoutId) clearTimeout(timeoutId);
timeoutId = setTimeout(() => {
if (!hasReceivedData) {
console.error("Chat request timed out - no response received");
controller.abort();
throw new Error("Request timed out. The server is not responding.");
}
}, 60000); // 60 second timeout
};
startTimeout();
const requestBody: {
prompt: string;
@ -81,8 +99,13 @@ export function useChatStreaming({
signal: controller.signal,
});
// Clear timeout once we get initial response
if (timeoutId) clearTimeout(timeoutId);
hasReceivedData = true;
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
const errorText = await response.text().catch(() => "Unknown error");
throw new Error(`Server error (${response.status}): ${errorText}`);
}
const reader = response.body?.getReader();
@ -112,6 +135,10 @@ export function useChatStreaming({
if (controller.signal.aborted || thisStreamId !== streamIdRef.current)
break;
if (done) break;
// Reset timeout on each chunk received
hasReceivedData = true;
if (timeoutId) clearTimeout(timeoutId);
buffer += decoder.decode(value, { stream: true });
@ -435,6 +462,12 @@ export function useChatStreaming({
}
} finally {
reader.releaseLock();
if (timeoutId) clearTimeout(timeoutId);
}
// Check if we got any content at all
if (!hasReceivedData || (!currentContent && currentFunctionCalls.length === 0)) {
throw new Error("No response received from the server. Please try again.");
}
// Finalize the message
@ -456,25 +489,43 @@ export function useChatStreaming({
return null;
} catch (error) {
// If stream was aborted, don't handle as error
if (streamAbortRef.current?.signal.aborted) {
// Clean up timeout
if (timeoutId) clearTimeout(timeoutId);
// If stream was aborted by user, don't handle as error
if (streamAbortRef.current?.signal.aborted && !(error as Error).message?.includes("timed out")) {
return null;
}
console.error("SSE Stream error:", error);
console.error("Chat stream error:", error);
setStreamingMessage(null);
// Create user-friendly error message
let errorContent = "Sorry, I couldn't connect to the chat service. Please try again.";
const errorMessage = (error as Error).message;
if (errorMessage?.includes("timed out")) {
errorContent = "The request timed out. The server took too long to respond. Please try again.";
} else if (errorMessage?.includes("No response")) {
errorContent = "The server didn't return a response. Please try again.";
} else if (errorMessage?.includes("NetworkError") || errorMessage?.includes("Failed to fetch")) {
errorContent = "Network error. Please check your connection and try again.";
} else if (errorMessage?.includes("Server error")) {
errorContent = errorMessage; // Use the detailed server error message
}
onError?.(error as Error);
const errorMessage: Message = {
const errorMessageObj: Message = {
role: "assistant",
content:
"Sorry, I couldn't connect to the chat service. Please try again.",
content: errorContent,
timestamp: new Date(),
isStreaming: false,
};
return errorMessage;
return errorMessageObj;
} finally {
if (timeoutId) clearTimeout(timeoutId);
setIsLoading(false);
}
};

View file

@ -4,7 +4,7 @@
export const DEFAULT_AGENT_SETTINGS = {
llm_model: "gpt-4o-mini",
system_prompt:
'You are the OpenRAG Agent. You answer questions using retrieval, reasoning, and tool use.\nYou have access to several tools. Your job is to determine **which tool to use and when**.\n### Available Tools\n- OpenSearch Retrieval Tool:\n Use this to search the indexed knowledge base. Use when the user asks about product details, internal concepts, processes, architecture, documentation, roadmaps, or anything that may be stored in the index.\n- Conversation History:\n Use this to maintain continuity when the user is referring to previous turns. \n Do not treat history as a factual source.\n- Conversation File Context:\n Use this when the user asks about a document they uploaded or refers directly to its contents.\n- URL Ingestion Tool:\n Use this **only** when the user explicitly asks you to read, summarize, or analyze the content of a URL.\n Do not ingest URLs automatically.\n- Calculator / Expression Evaluation Tool:\n Use this when the user asks to compare numbers, compute estimates, calculate totals, analyze pricing, or answer any question requiring mathematics or quantitative reasoning.\n If the answer requires arithmetic, call the calculator tool rather than calculating internally.\n### Retrieval Decision Rules\nUse OpenSearch **whenever**:\n1. The question may be answered from internal or indexed data.\n2. The user references team names, product names, release plans, configurations, requirements, or official information.\n3. The user needs a factual, grounded answer.\nDo **not** use retrieval if:\n- The question is purely creative (e.g., storytelling, analogies) or personal preference.\n- The user simply wants text reformatted or rewritten from what is already present in the conversation.\nWhen uncertain → **Retrieve.** Retrieval is low risk and improves grounding.\n### URL Ingestion Rules\nOnly ingest URLs when the user explicitly says:\n- "Read this link"\n- "Summarize this webpage"\n- "What does this site say?"\n- "Ingest this URL"\nIf unclear → ask a clarifying question.\n### Calculator Usage Rules\nUse the calculator when:\n- Performing arithmetic\n- Estimating totals\n- Comparing values\n- Modeling cost, time, effort, scale, or projections\nDo not perform math internally. **Call the calculator tool instead.**\n### Answer Construction Rules\n1. When asked: "What is OpenRAG", answer the following:\n"OpenRAG is an open-source package for building agentic RAG systems. It supports integration with a wide range of orchestration tools, vector databases, and LLM providers. OpenRAG connects and amplifies three popular, proven open-source projects into one powerful platform:\n**Langflow** Langflow is a powerful tool to build and deploy AI agents and MCP servers [Read more](https://www.langflow.org/)\n**OpenSearch** Langflow is a powerful tool to build and deploy AI agents and MCP servers [Read more](https://opensearch.org/)\n**Docling** Langflow is a powerful tool to build and deploy AI agents and MCP servers [Read more](https://www.docling.ai/)"\n2. Synthesize retrieved or ingested content in your own words.\n3. Support factual claims with citations in the format:\n (Source: <document_name_or_id>)\n4. If no supporting evidence is found:\n Say: "No relevant supporting sources were found for that request."\n5. Never invent facts or hallucinate details.\n6. Be concise, direct, and confident. \n7. Do not reveal internal chain-of-thought.',
'You are the OpenRAG Agent. You answer questions using retrieval, reasoning, and tool use.\nYou have access to several tools. Your job is to determine **which tool to use and when**.\n### Available Tools\n- OpenSearch Retrieval Tool:\n Use this to search the indexed knowledge base. Use when the user asks about product details, internal concepts, processes, architecture, documentation, roadmaps, or anything that may be stored in the index.\n- Conversation History:\n Use this to maintain continuity when the user is referring to previous turns. \n Do not treat history as a factual source.\n- Conversation File Context:\n Use this when the user asks about a document they uploaded or refers directly to its contents.\n- URL Ingestion Tool:\n Use this **only** when the user explicitly asks you to read, summarize, or analyze the content of a URL.\n Do not ingest URLs automatically.\n- Calculator / Expression Evaluation Tool:\n Use this when the user asks to compare numbers, compute estimates, calculate totals, analyze pricing, or answer any question requiring mathematics or quantitative reasoning.\n If the answer requires arithmetic, call the calculator tool rather than calculating internally.\n### Retrieval Decision Rules\nUse OpenSearch **whenever**:\n1. The question may be answered from internal or indexed data.\n2. The user references team names, product names, release plans, configurations, requirements, or official information.\n3. The user needs a factual, grounded answer.\nDo **not** use retrieval if:\n- The question is purely creative (e.g., storytelling, analogies) or personal preference.\n- The user simply wants text reformatted or rewritten from what is already present in the conversation.\nWhen uncertain → **Retrieve.** Retrieval is low risk and improves grounding.\n### URL Ingestion Rules\nOnly ingest URLs when the user explicitly says:\n- \"Read this link\"\n- \"Summarize this webpage\"\n- \"What does this site say?\"\n- \"Ingest this URL\"\nIf unclear → ask a clarifying question.\n### Calculator Usage Rules\nUse the calculator when:\n- Performing arithmetic\n- Estimating totals\n- Comparing values\n- Modeling cost, time, effort, scale, or projections\nDo not perform math internally. **Call the calculator tool instead.**\n### Answer Construction Rules\n1. When asked: \"What is OpenRAG\", answer the following:\n\"OpenRAG is an open-source package for building agentic RAG systems. It supports integration with a wide range of orchestration tools, vector databases, and LLM providers. OpenRAG connects and amplifies three popular, proven open-source projects into one powerful platform:\n**Langflow** Langflow is a powerful tool to build and deploy AI agents and MCP servers. [Read more](https://www.langflow.org/)\n**OpenSearch** OpenSearch is an open source, search and observability suite that brings order to unstructured data at scale. [Read more](https://opensearch.org/)\n**Docling** Docling simplifies document processing with advanced PDF understanding, OCR support, and seamless AI integrations. Parse PDFs, DOCX, PPTX, images & more. [Read more](https://www.docling.ai/)\"\n2. Synthesize retrieved or ingested content in your own words.\n3. Support factual claims with citations in the format:\n (Source: <document_name_or_id>)\n4. If no supporting evidence is found:\n Say: \"No relevant supporting sources were found for that request.\"\n5. Never invent facts or hallucinate details.\n6. Be concise, direct, and confident. \n7. Do not reveal internal chain-of-thought.',
} as const;
/**

View file

@ -23,7 +23,7 @@
"@radix-ui/react-select": "^2.2.5",
"@radix-ui/react-separator": "^1.1.7",
"@radix-ui/react-slider": "^1.3.6",
"@radix-ui/react-slot": "^1.2.3",
"@radix-ui/react-slot": "^1.2.4",
"@radix-ui/react-switch": "^1.2.5",
"@radix-ui/react-tabs": "^1.1.13",
"@radix-ui/react-tooltip": "^1.2.8",
@ -42,6 +42,7 @@
"next-themes": "^0.4.6",
"react": "^19.0.0",
"react-dom": "^19.0.0",
"react-dropzone": "^14.3.8",
"react-hook-form": "^7.65.0",
"react-icons": "^5.5.0",
"react-markdown": "^10.1.0",
@ -1545,6 +1546,23 @@
}
}
},
"node_modules/@radix-ui/react-collection/node_modules/@radix-ui/react-slot": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz",
"integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.2"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-compose-refs": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@radix-ui/react-compose-refs/-/react-compose-refs-1.1.2.tgz",
@ -1611,6 +1629,23 @@
}
}
},
"node_modules/@radix-ui/react-dialog/node_modules/@radix-ui/react-slot": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz",
"integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.2"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-direction": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-direction/-/react-direction-1.1.1.tgz",
@ -1803,6 +1838,23 @@
}
}
},
"node_modules/@radix-ui/react-menu/node_modules/@radix-ui/react-slot": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz",
"integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.2"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-navigation-menu": {
"version": "1.2.14",
"resolved": "https://registry.npmjs.org/@radix-ui/react-navigation-menu/-/react-navigation-menu-1.2.14.tgz",
@ -1876,6 +1928,23 @@
}
}
},
"node_modules/@radix-ui/react-popover/node_modules/@radix-ui/react-slot": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz",
"integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.2"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-popper": {
"version": "1.2.8",
"resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.2.8.tgz",
@ -1979,6 +2048,23 @@
}
}
},
"node_modules/@radix-ui/react-primitive/node_modules/@radix-ui/react-slot": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz",
"integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.2"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-radio-group": {
"version": "1.3.8",
"resolved": "https://registry.npmjs.org/@radix-ui/react-radio-group/-/react-radio-group-1.3.8.tgz",
@ -2085,6 +2171,23 @@
}
}
},
"node_modules/@radix-ui/react-select/node_modules/@radix-ui/react-slot": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz",
"integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.2"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-separator": {
"version": "1.1.7",
"resolved": "https://registry.npmjs.org/@radix-ui/react-separator/-/react-separator-1.1.7.tgz",
@ -2142,10 +2245,9 @@
}
},
"node_modules/@radix-ui/react-slot": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz",
"integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==",
"license": "MIT",
"version": "1.2.4",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.4.tgz",
"integrity": "sha512-Jl+bCv8HxKnlTLVrcDE8zTMJ09R9/ukw4qBs/oZClOfoQk/cOTbDn+NceXfV7j09YPVQUryJPHurafcSg6EVKA==",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.2"
},
@ -2252,6 +2354,23 @@
}
}
},
"node_modules/@radix-ui/react-tooltip/node_modules/@radix-ui/react-slot": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-slot/-/react-slot-1.2.3.tgz",
"integrity": "sha512-aeNmHnBxbi2St0au6VBVC7JXFlhLlOnvIIlePNniyUNAClzmtAUEY8/pBiK3iHjufOlwA+c20/8jngo7xcrg8A==",
"dependencies": {
"@radix-ui/react-compose-refs": "1.1.2"
},
"peerDependencies": {
"@types/react": "*",
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
}
}
},
"node_modules/@radix-ui/react-use-callback-ref": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@radix-ui/react-use-callback-ref/-/react-use-callback-ref-1.1.1.tgz",
@ -3536,6 +3655,14 @@
"node": ">= 0.4"
}
},
"node_modules/attr-accept": {
"version": "2.2.5",
"resolved": "https://registry.npmjs.org/attr-accept/-/attr-accept-2.2.5.tgz",
"integrity": "sha512-0bDNnY/u6pPwHDMoF0FieU354oBi0a8rD9FcsLwzcGWbc8KS8KPIi7y+s13OlVY+gMWc/9xEMUgNE6Qm8ZllYQ==",
"engines": {
"node": ">=4"
}
},
"node_modules/autoprefixer": {
"version": "10.4.21",
"resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.21.tgz",
@ -5045,6 +5172,17 @@
"node": ">=16.0.0"
}
},
"node_modules/file-selector": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/file-selector/-/file-selector-2.1.2.tgz",
"integrity": "sha512-QgXo+mXTe8ljeqUFaX3QVHc5osSItJ/Km+xpocx0aSqWGMSCf6qYs/VnzZgS864Pjn5iceMRFigeAV7AfTlaig==",
"dependencies": {
"tslib": "^2.7.0"
},
"engines": {
"node": ">= 12"
}
},
"node_modules/fill-range": {
"version": "7.1.1",
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
@ -6681,7 +6819,6 @@
"version": "0.525.0",
"resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.525.0.tgz",
"integrity": "sha512-Tm1txJ2OkymCGkvwoHt33Y2JpN5xucVq1slHcgE6Lk0WjDfjgKWor5CdVER8U6DvcfMwh4M8XxmpTiyzfmfDYQ==",
"license": "ISC",
"peerDependencies": {
"react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0"
}
@ -8504,6 +8641,22 @@
"react": "^19.1.1"
}
},
"node_modules/react-dropzone": {
"version": "14.3.8",
"resolved": "https://registry.npmjs.org/react-dropzone/-/react-dropzone-14.3.8.tgz",
"integrity": "sha512-sBgODnq+lcA4P296DY4wacOZz3JFpD99fp+hb//iBO2HHnyeZU3FwWyXJ6salNpqQdsZrgMrotuko/BdJMV8Ug==",
"dependencies": {
"attr-accept": "^2.2.4",
"file-selector": "^2.1.0",
"prop-types": "^15.8.1"
},
"engines": {
"node": ">= 10.13"
},
"peerDependencies": {
"react": ">= 16.8 || 18.0.0"
}
},
"node_modules/react-hook-form": {
"version": "7.65.0",
"resolved": "https://registry.npmjs.org/react-hook-form/-/react-hook-form-7.65.0.tgz",

View file

@ -26,7 +26,7 @@
"@radix-ui/react-select": "^2.2.5",
"@radix-ui/react-separator": "^1.1.7",
"@radix-ui/react-slider": "^1.3.6",
"@radix-ui/react-slot": "^1.2.3",
"@radix-ui/react-slot": "^1.2.4",
"@radix-ui/react-switch": "^1.2.5",
"@radix-ui/react-tabs": "^1.1.13",
"@radix-ui/react-tooltip": "^1.2.8",
@ -45,6 +45,7 @@
"next-themes": "^0.4.6",
"react": "^19.0.0",
"react-dom": "^19.0.0",
"react-dropzone": "^14.3.8",
"react-hook-form": "^7.65.0",
"react-icons": "^5.5.0",
"react-markdown": "^10.1.0",

View file

@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "openrag"
version = "0.1.35"
version = "0.1.36"
description = "Add your description here"
readme = "README.md"
requires-python = ">=3.13"

View file

@ -34,7 +34,7 @@ def get_conversation_thread(user_id: str, previous_response_id: str = None):
"messages": [
{
"role": "system",
"content": "You are the OpenRAG Agent. You answer questions using retrieval, reasoning, and tool use.\nYou have access to several tools. Your job is to determine **which tool to use and when**.\n### Available Tools\n- OpenSearch Retrieval Tool:\n Use this to search the indexed knowledge base. Use when the user asks about product details, internal concepts, processes, architecture, documentation, roadmaps, or anything that may be stored in the index.\n- Conversation History:\n Use this to maintain continuity when the user is referring to previous turns. \n Do not treat history as a factual source.\n- Conversation File Context:\n Use this when the user asks about a document they uploaded or refers directly to its contents.\n- URL Ingestion Tool:\n Use this **only** when the user explicitly asks you to read, summarize, or analyze the content of a URL.\n Do not ingest URLs automatically.\n- Calculator / Expression Evaluation Tool:\n Use this when the user asks to compare numbers, compute estimates, calculate totals, analyze pricing, or answer any question requiring mathematics or quantitative reasoning.\n If the answer requires arithmetic, call the calculator tool rather than calculating internally.\n### Retrieval Decision Rules\nUse OpenSearch **whenever**:\n1. The question may be answered from internal or indexed data.\n2. The user references team names, product names, release plans, configurations, requirements, or official information.\n3. The user needs a factual, grounded answer.\nDo **not** use retrieval if:\n- The question is purely creative (e.g., storytelling, analogies) or personal preference.\n- The user simply wants text reformatted or rewritten from what is already present in the conversation.\nWhen uncertain → **Retrieve.** Retrieval is low risk and improves grounding.\n### URL Ingestion Rules\nOnly ingest URLs when the user explicitly says:\n- \"Read this link\"\n- \"Summarize this webpage\"\n- \"What does this site say?\"\n- \"Ingest this URL\"\nIf unclear → ask a clarifying question.\n### Calculator Usage Rules\nUse the calculator when:\n- Performing arithmetic\n- Estimating totals\n- Comparing values\n- Modeling cost, time, effort, scale, or projections\nDo not perform math internally. **Call the calculator tool instead.**\n### Answer Construction Rules\n1. When asked: \"What is OpenRAG\", answer the following:\n\"OpenRAG is an open-source package for building agentic RAG systems. It supports integration with a wide range of orchestration tools, vector databases, and LLM providers. OpenRAG connects and amplifies three popular, proven open-source projects into one powerful platform:\n**Langflow** Langflow is a powerful tool to build and deploy AI agents and MCP servers [Read more](https://www.langflow.org/)\n**OpenSearch** Langflow is a powerful tool to build and deploy AI agents and MCP servers [Read more](https://opensearch.org/)\n**Docling** Langflow is a powerful tool to build and deploy AI agents and MCP servers [Read more](https://www.docling.ai/)\"\n2. Synthesize retrieved or ingested content in your own words.\n3. Support factual claims with citations in the format:\n (Source: <document_name_or_id>)\n4. If no supporting evidence is found:\n Say: \"No relevant supporting sources were found for that request.\"\n5. Never invent facts or hallucinate details.\n6. Be concise, direct, and confident. \n7. Do not reveal internal chain-of-thought.",
"content": "You are the OpenRAG Agent. You answer questions using retrieval, reasoning, and tool use.\nYou have access to several tools. Your job is to determine **which tool to use and when**.\n### Available Tools\n- OpenSearch Retrieval Tool:\n Use this to search the indexed knowledge base. Use when the user asks about product details, internal concepts, processes, architecture, documentation, roadmaps, or anything that may be stored in the index.\n- Conversation History:\n Use this to maintain continuity when the user is referring to previous turns. \n Do not treat history as a factual source.\n- Conversation File Context:\n Use this when the user asks about a document they uploaded or refers directly to its contents.\n- URL Ingestion Tool:\n Use this **only** when the user explicitly asks you to read, summarize, or analyze the content of a URL.\n Do not ingest URLs automatically.\n- Calculator / Expression Evaluation Tool:\n Use this when the user asks to compare numbers, compute estimates, calculate totals, analyze pricing, or answer any question requiring mathematics or quantitative reasoning.\n If the answer requires arithmetic, call the calculator tool rather than calculating internally.\n### Retrieval Decision Rules\nUse OpenSearch **whenever**:\n1. The question may be answered from internal or indexed data.\n2. The user references team names, product names, release plans, configurations, requirements, or official information.\n3. The user needs a factual, grounded answer.\nDo **not** use retrieval if:\n- The question is purely creative (e.g., storytelling, analogies) or personal preference.\n- The user simply wants text reformatted or rewritten from what is already present in the conversation.\nWhen uncertain → **Retrieve.** Retrieval is low risk and improves grounding.\n### URL Ingestion Rules\nOnly ingest URLs when the user explicitly says:\n- \"Read this link\"\n- \"Summarize this webpage\"\n- \"What does this site say?\"\n- \"Ingest this URL\"\nIf unclear → ask a clarifying question.\n### Calculator Usage Rules\nUse the calculator when:\n- Performing arithmetic\n- Estimating totals\n- Comparing values\n- Modeling cost, time, effort, scale, or projections\nDo not perform math internally. **Call the calculator tool instead.**\n### Answer Construction Rules\n1. When asked: \"What is OpenRAG\", answer the following:\n\"OpenRAG is an open-source package for building agentic RAG systems. It supports integration with a wide range of orchestration tools, vector databases, and LLM providers. OpenRAG connects and amplifies three popular, proven open-source projects into one powerful platform:\n**Langflow** Langflow is a powerful tool to build and deploy AI agents and MCP servers. [Read more](https://www.langflow.org/)\n**OpenSearch** OpenSearch is an open source, search and observability suite that brings order to unstructured data at scale. [Read more](https://opensearch.org/)\n**Docling** Docling simplifies document processing with advanced PDF understanding, OCR support, and seamless AI integrations. Parse PDFs, DOCX, PPTX, images & more. [Read more](https://www.docling.ai/)\"\n2. Synthesize retrieved or ingested content in your own words.\n3. Support factual claims with citations in the format:\n (Source: <document_name_or_id>)\n4. If no supporting evidence is found:\n Say: \"No relevant supporting sources were found for that request.\"\n5. Never invent facts or hallucinate details.\n6. Be concise, direct, and confident. \n7. Do not reveal internal chain-of-thought.",
}
],
"previous_response_id": previous_response_id, # Parent response_id for branching

View file

@ -166,6 +166,10 @@ class GoogleDriveConnector(BaseConnector):
# -------------------------
# Helpers
# -------------------------
def _clear_shortcut_cache(self) -> None:
"""Clear the shortcut resolution cache to prevent stale data."""
self._shortcut_cache.clear()
@property
def _drives_get_flags(self) -> Dict[str, Any]:
"""
@ -208,6 +212,10 @@ class GoogleDriveConnector(BaseConnector):
if target_id in self._shortcut_cache:
return self._shortcut_cache[target_id]
if self.service is None:
logger.warning("Cannot resolve shortcut - service not initialized")
return file_obj
try:
meta = (
self.service.files()
@ -231,6 +239,11 @@ class GoogleDriveConnector(BaseConnector):
"""
List immediate children of a folder.
"""
if self.service is None:
raise RuntimeError(
"Google Drive service is not initialized. Please authenticate first."
)
query = f"'{folder_id}' in parents and trashed = false"
page_token = None
results: List[Dict[str, Any]] = []
@ -332,6 +345,9 @@ class GoogleDriveConnector(BaseConnector):
- items inside folder_ids (with optional recursion)
Shortcuts are resolved to their targets automatically.
"""
# Clear shortcut cache to ensure fresh data
self._clear_shortcut_cache()
seen: Set[str] = set()
items: List[Dict[str, Any]] = []
folders_to_expand: List[str] = []
@ -374,6 +390,10 @@ class GoogleDriveConnector(BaseConnector):
# - OR default to entire drive.
# Here we choose to require explicit selection:
if not self.cfg.file_ids and not self.cfg.folder_ids:
logger.warning(
"No file_ids or folder_ids specified - returning empty result. "
"Explicit selection is required."
)
return []
items = self._filter_by_mime(items)
@ -383,6 +403,16 @@ class GoogleDriveConnector(BaseConnector):
for m in items
if m.get("mimeType") != "application/vnd.google-apps.folder"
]
# Log a warning if we ended up with no files after expansion/filtering
if not items and (self.cfg.file_ids or self.cfg.folder_ids):
logger.warning(
f"No files found after expanding and filtering. "
f"file_ids={self.cfg.file_ids}, folder_ids={self.cfg.folder_ids}. "
f"This could mean: (1) folders are empty, (2) all files were filtered by mime types, "
f"or (3) permissions prevent access to the files."
)
return items
# -------------------------
@ -416,6 +446,11 @@ class GoogleDriveConnector(BaseConnector):
Download bytes for a given file (exporting if Google-native).
Raises ValueError if the item is a folder (folders cannot be downloaded).
"""
if self.service is None:
raise RuntimeError(
"Google Drive service is not initialized. Please authenticate first."
)
file_id = file_meta["id"]
file_name = file_meta.get("name", "unknown")
mime_type = file_meta.get("mimeType") or ""
@ -543,6 +578,12 @@ class GoogleDriveConnector(BaseConnector):
- If page_token is None: return all files in one batch.
- Otherwise: return {} and no next_page_token.
"""
# Ensure service is initialized
if self.service is None:
raise RuntimeError(
"Google Drive service is not initialized. Please authenticate first."
)
try:
items = self._iter_selected_items()
@ -560,12 +601,12 @@ class GoogleDriveConnector(BaseConnector):
"next_page_token": None, # no more pages
}
except Exception as e:
# Log the error
try:
logger.error(f"GoogleDriveConnector.list_files failed: {e}")
except Exception:
pass
return {"files": [], "next_page_token": None}
# Log the error and re-raise to surface authentication/permission issues
logger.error(
f"GoogleDriveConnector.list_files failed: {e}",
exc_info=True
)
raise
async def get_file_content(self, file_id: str) -> ConnectorDocument:
"""

View file

@ -60,8 +60,24 @@ class GoogleDriveOAuth:
# If credentials are expired, refresh them
if self.creds and self.creds.expired and self.creds.refresh_token:
self.creds.refresh(Request())
await self.save_credentials()
try:
self.creds.refresh(Request())
await self.save_credentials()
except Exception as e:
# Refresh failed - likely refresh token expired or revoked
# Clear credentials and raise a clear error
self.creds = None
# Try to clean up the invalid token file
if os.path.exists(self.token_file):
try:
os.remove(self.token_file)
except Exception:
pass
raise ValueError(
f"Failed to refresh Google Drive credentials. "
f"The refresh token may have expired or been revoked. "
f"Please re-authenticate: {str(e)}"
) from e
return self.creds

View file

@ -95,6 +95,12 @@ class OneDriveConnector(BaseConnector):
self._default_params = {
"$select": "id,name,size,lastModifiedDateTime,createdDateTime,webUrl,file,folder,@microsoft.graph.downloadUrl"
}
# Selective sync support (similar to Google Drive)
self.cfg = type('OneDriveConfig', (), {
'file_ids': config.get('file_ids') or config.get('selected_files') or config.get('selected_file_ids'),
'folder_ids': config.get('folder_ids') or config.get('selected_folders') or config.get('selected_folder_ids'),
})()
@property
def _graph_base_url(self) -> str:
@ -251,6 +257,10 @@ class OneDriveConnector(BaseConnector):
if not await self.authenticate():
raise RuntimeError("OneDrive authentication failed during file listing")
# If file_ids or folder_ids are specified in config, use selective sync
if self.cfg.file_ids or self.cfg.folder_ids:
return await self._list_selected_files()
files: List[Dict[str, Any]] = []
max_files_value = max_files if max_files is not None else 100
@ -349,6 +359,14 @@ class OneDriveConnector(BaseConnector):
response = await self._make_graph_request(url, params=params)
item = response.json()
# Check if it's a folder
if item.get("folder"):
return {
"id": file_id,
"name": item.get("name", ""),
"isFolder": True,
}
if item.get("file"):
return {
"id": file_id,
@ -360,6 +378,7 @@ class OneDriveConnector(BaseConnector):
"mime_type": item.get("file", {}).get("mimeType", self._get_mime_type(item.get("name", ""))),
"url": item.get("webUrl", ""),
"download_url": item.get("@microsoft.graph.downloadUrl"),
"isFolder": False,
}
return None
@ -429,6 +448,62 @@ class OneDriveConnector(BaseConnector):
response.raise_for_status()
return response
async def _list_selected_files(self) -> Dict[str, Any]:
"""List only selected files/folders (selective sync)."""
files: List[Dict[str, Any]] = []
# Process selected file IDs
if self.cfg.file_ids:
for file_id in self.cfg.file_ids:
try:
file_meta = await self._get_file_metadata_by_id(file_id)
if file_meta and not file_meta.get('isFolder', False):
files.append(file_meta)
elif file_meta and file_meta.get('isFolder', False):
# If it's a folder, expand its contents
folder_files = await self._list_folder_contents(file_id)
files.extend(folder_files)
except Exception as e:
logger.warning(f"Failed to get file {file_id}: {e}")
continue
# Process selected folder IDs
if self.cfg.folder_ids:
for folder_id in self.cfg.folder_ids:
try:
folder_files = await self._list_folder_contents(folder_id)
files.extend(folder_files)
except Exception as e:
logger.warning(f"Failed to list folder {folder_id}: {e}")
continue
return {"files": files, "next_page_token": None}
async def _list_folder_contents(self, folder_id: str) -> List[Dict[str, Any]]:
"""List all files in a folder recursively."""
files: List[Dict[str, Any]] = []
try:
url = f"{self._graph_base_url}/me/drive/items/{folder_id}/children"
params = dict(self._default_params)
response = await self._make_graph_request(url, params=params)
data = response.json()
items = data.get("value", [])
for item in items:
if item.get("file"): # It's a file
file_meta = await self._get_file_metadata_by_id(item.get("id"))
if file_meta:
files.append(file_meta)
elif item.get("folder"): # It's a subfolder, recurse
subfolder_files = await self._list_folder_contents(item.get("id"))
files.extend(subfolder_files)
except Exception as e:
logger.error(f"Failed to list folder contents for {folder_id}: {e}")
return files
def _get_mime_type(self, filename: str) -> str:
"""Get MIME type based on file extension."""
import mimetypes

View file

@ -100,6 +100,12 @@ class SharePointConnector(BaseConnector):
self._default_params = {
"$select": "id,name,size,lastModifiedDateTime,createdDateTime,webUrl,file,folder,@microsoft.graph.downloadUrl"
}
# Selective sync support (similar to Google Drive and OneDrive)
self.cfg = type('SharePointConfig', (), {
'file_ids': config.get('file_ids') or config.get('selected_files') or config.get('selected_file_ids'),
'folder_ids': config.get('folder_ids') or config.get('selected_folders') or config.get('selected_folder_ids'),
})()
@property
def _graph_base_url(self) -> str:
@ -293,6 +299,10 @@ class SharePointConnector(BaseConnector):
if not await self.authenticate():
raise RuntimeError("SharePoint authentication failed during file listing")
# If file_ids or folder_ids are specified in config, use selective sync
if self.cfg.file_ids or self.cfg.folder_ids:
return await self._list_selected_files()
files = []
max_files_value = max_files if max_files is not None else 100
@ -426,6 +436,14 @@ class SharePointConnector(BaseConnector):
"download_url": item.get("@microsoft.graph.downloadUrl")
}
# Check if it's a folder
if item.get("folder"):
return {
"id": file_id,
"name": item.get("name", ""),
"isFolder": True,
}
return None
except Exception as e:
@ -453,6 +471,67 @@ class SharePointConnector(BaseConnector):
logger.error(f"Failed to download file content for {file_id}: {e}")
raise
async def _list_selected_files(self) -> Dict[str, Any]:
"""List only selected files/folders (selective sync)."""
files: List[Dict[str, Any]] = []
# Process selected file IDs
if self.cfg.file_ids:
for file_id in self.cfg.file_ids:
try:
file_meta = await self._get_file_metadata_by_id(file_id)
if file_meta and not file_meta.get('isFolder', False):
files.append(file_meta)
elif file_meta and file_meta.get('isFolder', False):
# If it's a folder, expand its contents
folder_files = await self._list_folder_contents(file_id)
files.extend(folder_files)
except Exception as e:
logger.warning(f"Failed to get file {file_id}: {e}")
continue
# Process selected folder IDs
if self.cfg.folder_ids:
for folder_id in self.cfg.folder_ids:
try:
folder_files = await self._list_folder_contents(folder_id)
files.extend(folder_files)
except Exception as e:
logger.warning(f"Failed to list folder {folder_id}: {e}")
continue
return {"files": files, "next_page_token": None}
async def _list_folder_contents(self, folder_id: str) -> List[Dict[str, Any]]:
"""List all files in a folder recursively."""
files: List[Dict[str, Any]] = []
try:
site_info = self._parse_sharepoint_url()
if site_info:
url = f"{self._graph_base_url}/sites/{site_info['host_name']}:/sites/{site_info['site_name']}:/drive/items/{folder_id}/children"
else:
url = f"{self._graph_base_url}/me/drive/items/{folder_id}/children"
params = dict(self._default_params)
response = await self._make_graph_request(url, params=params)
data = response.json()
items = data.get("value", [])
for item in items:
if item.get("file"): # It's a file
file_meta = await self._get_file_metadata_by_id(item.get("id"))
if file_meta:
files.append(file_meta)
elif item.get("folder"): # It's a subfolder, recurse
subfolder_files = await self._list_folder_contents(item.get("id"))
files.extend(subfolder_files)
except Exception as e:
logger.error(f"Failed to list folder contents for {folder_id}: {e}")
return files
async def _download_file_from_url(self, download_url: str) -> bytes:
"""Download file content from direct download URL"""
try: