Merge pull request #573 from langflow-ai/onboarding-fqs
This commit is contained in:
commit
9232e45d93
31 changed files with 1236 additions and 218 deletions
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
|
@ -3,6 +3,7 @@ import {
|
||||||
useMutation,
|
useMutation,
|
||||||
useQueryClient,
|
useQueryClient,
|
||||||
} from "@tanstack/react-query";
|
} from "@tanstack/react-query";
|
||||||
|
import { ONBOARDING_OPENRAG_DOCS_FILTER_ID_KEY } from "@/lib/constants";
|
||||||
|
|
||||||
export interface OnboardingVariables {
|
export interface OnboardingVariables {
|
||||||
// Provider selection
|
// Provider selection
|
||||||
|
|
@ -28,6 +29,7 @@ export interface OnboardingVariables {
|
||||||
interface OnboardingResponse {
|
interface OnboardingResponse {
|
||||||
message: string;
|
message: string;
|
||||||
edited: boolean;
|
edited: boolean;
|
||||||
|
openrag_docs_filter_id?: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
export const useOnboardingMutation = (
|
export const useOnboardingMutation = (
|
||||||
|
|
@ -59,6 +61,15 @@ export const useOnboardingMutation = (
|
||||||
|
|
||||||
return useMutation({
|
return useMutation({
|
||||||
mutationFn: submitOnboarding,
|
mutationFn: submitOnboarding,
|
||||||
|
onSuccess: (data) => {
|
||||||
|
// Store OpenRAG Docs filter ID if returned
|
||||||
|
if (data.openrag_docs_filter_id && typeof window !== "undefined") {
|
||||||
|
localStorage.setItem(
|
||||||
|
ONBOARDING_OPENRAG_DOCS_FILTER_ID_KEY,
|
||||||
|
data.openrag_docs_filter_id
|
||||||
|
);
|
||||||
|
}
|
||||||
|
},
|
||||||
onSettled: () => {
|
onSettled: () => {
|
||||||
// Invalidate settings query to refetch updated data
|
// Invalidate settings query to refetch updated data
|
||||||
queryClient.invalidateQueries({ queryKey: ["settings"] });
|
queryClient.invalidateQueries({ queryKey: ["settings"] });
|
||||||
|
|
|
||||||
|
|
@ -60,9 +60,9 @@ export const useDoclingHealthQuery = (
|
||||||
// If healthy, check every 30 seconds; otherwise check every 3 seconds
|
// If healthy, check every 30 seconds; otherwise check every 3 seconds
|
||||||
return query.state.data?.status === "healthy" ? 30000 : 3000;
|
return query.state.data?.status === "healthy" ? 30000 : 3000;
|
||||||
},
|
},
|
||||||
refetchOnWindowFocus: true,
|
refetchOnWindowFocus: false, // Disabled to reduce unnecessary calls on tab switches
|
||||||
refetchOnMount: true,
|
refetchOnMount: true,
|
||||||
staleTime: 30000, // Consider data stale after 25 seconds
|
staleTime: 30000, // Consider data fresh for 30 seconds
|
||||||
...options,
|
...options,
|
||||||
},
|
},
|
||||||
queryClient,
|
queryClient,
|
||||||
|
|
|
||||||
|
|
@ -51,13 +51,15 @@ export const useGetConversationsQuery = (
|
||||||
) => {
|
) => {
|
||||||
const queryClient = useQueryClient();
|
const queryClient = useQueryClient();
|
||||||
|
|
||||||
async function getConversations(): Promise<ChatConversation[]> {
|
async function getConversations(context: { signal?: AbortSignal }): Promise<ChatConversation[]> {
|
||||||
try {
|
try {
|
||||||
// Fetch from the selected endpoint only
|
// Fetch from the selected endpoint only
|
||||||
const apiEndpoint =
|
const apiEndpoint =
|
||||||
endpoint === "chat" ? "/api/chat/history" : "/api/langflow/history";
|
endpoint === "chat" ? "/api/chat/history" : "/api/langflow/history";
|
||||||
|
|
||||||
const response = await fetch(apiEndpoint);
|
const response = await fetch(apiEndpoint, {
|
||||||
|
signal: context.signal,
|
||||||
|
});
|
||||||
|
|
||||||
if (!response.ok) {
|
if (!response.ok) {
|
||||||
console.error(`Failed to fetch conversations: ${response.status}`);
|
console.error(`Failed to fetch conversations: ${response.status}`);
|
||||||
|
|
@ -84,6 +86,10 @@ export const useGetConversationsQuery = (
|
||||||
|
|
||||||
return conversations;
|
return conversations;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
// Ignore abort errors - these are expected when requests are cancelled
|
||||||
|
if (error instanceof Error && error.name === 'AbortError') {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
console.error(`Failed to fetch ${endpoint} conversations:`, error);
|
console.error(`Failed to fetch ${endpoint} conversations:`, error);
|
||||||
return [];
|
return [];
|
||||||
}
|
}
|
||||||
|
|
@ -94,8 +100,11 @@ export const useGetConversationsQuery = (
|
||||||
queryKey: ["conversations", endpoint, refreshTrigger],
|
queryKey: ["conversations", endpoint, refreshTrigger],
|
||||||
placeholderData: (prev) => prev,
|
placeholderData: (prev) => prev,
|
||||||
queryFn: getConversations,
|
queryFn: getConversations,
|
||||||
staleTime: 0, // Always consider data stale to ensure fresh data on trigger changes
|
staleTime: 5000, // Consider data fresh for 5 seconds to prevent excessive refetching
|
||||||
gcTime: 5 * 60 * 1000, // Keep in cache for 5 minutes
|
gcTime: 5 * 60 * 1000, // Keep in cache for 5 minutes
|
||||||
|
networkMode: 'always', // Ensure requests can be cancelled
|
||||||
|
refetchOnMount: false, // Don't refetch on every mount
|
||||||
|
refetchOnWindowFocus: false, // Don't refetch when window regains focus
|
||||||
...options,
|
...options,
|
||||||
},
|
},
|
||||||
queryClient,
|
queryClient,
|
||||||
|
|
|
||||||
21
frontend/app/api/queries/useGetFilterByIdQuery.ts
Normal file
21
frontend/app/api/queries/useGetFilterByIdQuery.ts
Normal file
|
|
@ -0,0 +1,21 @@
|
||||||
|
import type { KnowledgeFilter } from "./useGetFiltersSearchQuery";
|
||||||
|
|
||||||
|
export async function getFilterById(
|
||||||
|
filterId: string
|
||||||
|
): Promise<KnowledgeFilter | null> {
|
||||||
|
try {
|
||||||
|
const response = await fetch(`/api/knowledge-filter/${filterId}`, {
|
||||||
|
method: "GET",
|
||||||
|
headers: { "Content-Type": "application/json" },
|
||||||
|
});
|
||||||
|
|
||||||
|
const json = await response.json();
|
||||||
|
if (!response.ok || !json.success) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return json.filter as KnowledgeFilter;
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Failed to fetch filter by ID:", error);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -34,7 +34,7 @@ export const useGetNudgesQuery = (
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
async function getNudges(): Promise<Nudge[]> {
|
async function getNudges(context: { signal?: AbortSignal }): Promise<Nudge[]> {
|
||||||
try {
|
try {
|
||||||
const requestBody: {
|
const requestBody: {
|
||||||
filters?: NudgeFilters;
|
filters?: NudgeFilters;
|
||||||
|
|
@ -58,6 +58,7 @@ export const useGetNudgesQuery = (
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
},
|
},
|
||||||
body: JSON.stringify(requestBody),
|
body: JSON.stringify(requestBody),
|
||||||
|
signal: context.signal,
|
||||||
});
|
});
|
||||||
const data = await response.json();
|
const data = await response.json();
|
||||||
|
|
||||||
|
|
@ -67,6 +68,10 @@ export const useGetNudgesQuery = (
|
||||||
|
|
||||||
return DEFAULT_NUDGES;
|
return DEFAULT_NUDGES;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
|
// Ignore abort errors - these are expected when requests are cancelled
|
||||||
|
if (error instanceof Error && error.name === 'AbortError') {
|
||||||
|
return DEFAULT_NUDGES;
|
||||||
|
}
|
||||||
console.error("Error getting nudges", error);
|
console.error("Error getting nudges", error);
|
||||||
return DEFAULT_NUDGES;
|
return DEFAULT_NUDGES;
|
||||||
}
|
}
|
||||||
|
|
@ -76,6 +81,10 @@ export const useGetNudgesQuery = (
|
||||||
{
|
{
|
||||||
queryKey: ["nudges", chatId, filters, limit, scoreThreshold],
|
queryKey: ["nudges", chatId, filters, limit, scoreThreshold],
|
||||||
queryFn: getNudges,
|
queryFn: getNudges,
|
||||||
|
staleTime: 10000, // Consider data fresh for 10 seconds to prevent rapid refetching
|
||||||
|
networkMode: 'always', // Ensure requests can be cancelled
|
||||||
|
refetchOnMount: false, // Don't refetch on every mount
|
||||||
|
refetchOnWindowFocus: false, // Don't refetch when window regains focus
|
||||||
refetchInterval: (query) => {
|
refetchInterval: (query) => {
|
||||||
// If data is empty, refetch every 5 seconds
|
// If data is empty, refetch every 5 seconds
|
||||||
const data = query.state.data;
|
const data = query.state.data;
|
||||||
|
|
|
||||||
|
|
@ -127,6 +127,12 @@ export const useGetSearchQuery = (
|
||||||
},
|
},
|
||||||
body: JSON.stringify(searchPayload),
|
body: JSON.stringify(searchPayload),
|
||||||
});
|
});
|
||||||
|
|
||||||
|
if (!response.ok) {
|
||||||
|
const errorData = await response.json().catch(() => ({ error: "Unknown error" }));
|
||||||
|
throw new Error(errorData.error || `Search failed with status ${response.status}`);
|
||||||
|
}
|
||||||
|
|
||||||
const data = await response.json();
|
const data = await response.json();
|
||||||
// Group chunks by filename to create file results similar to page.tsx
|
// Group chunks by filename to create file results similar to page.tsx
|
||||||
const fileMap = new Map<
|
const fileMap = new Map<
|
||||||
|
|
@ -198,7 +204,8 @@ export const useGetSearchQuery = (
|
||||||
return files;
|
return files;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error("Error getting files", error);
|
console.error("Error getting files", error);
|
||||||
return [];
|
// Re-throw the error so React Query can handle it and trigger onError callbacks
|
||||||
|
throw error;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -207,6 +214,7 @@ export const useGetSearchQuery = (
|
||||||
queryKey: ["search", queryData, query],
|
queryKey: ["search", queryData, query],
|
||||||
placeholderData: (prev) => prev,
|
placeholderData: (prev) => prev,
|
||||||
queryFn: getFiles,
|
queryFn: getFiles,
|
||||||
|
retry: false, // Don't retry on errors - show them immediately
|
||||||
...options,
|
...options,
|
||||||
},
|
},
|
||||||
queryClient,
|
queryClient,
|
||||||
|
|
|
||||||
|
|
@ -96,9 +96,9 @@ export const useProviderHealthQuery = (
|
||||||
// If healthy, check every 30 seconds; otherwise check every 3 seconds
|
// If healthy, check every 30 seconds; otherwise check every 3 seconds
|
||||||
return query.state.data?.status === "healthy" ? 30000 : 3000;
|
return query.state.data?.status === "healthy" ? 30000 : 3000;
|
||||||
},
|
},
|
||||||
refetchOnWindowFocus: true,
|
refetchOnWindowFocus: false, // Disabled to reduce unnecessary calls on tab switches
|
||||||
refetchOnMount: true,
|
refetchOnMount: true,
|
||||||
staleTime: 30000, // Consider data stale after 25 seconds
|
staleTime: 30000, // Consider data fresh for 30 seconds
|
||||||
enabled: !!settings?.edited && options?.enabled !== false, // Only run after onboarding is complete
|
enabled: !!settings?.edited && options?.enabled !== false, // Only run after onboarding is complete
|
||||||
...options,
|
...options,
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -110,6 +110,13 @@ function ChatPage() {
|
||||||
} else {
|
} else {
|
||||||
refreshConversationsSilent();
|
refreshConversationsSilent();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Save filter association for this response
|
||||||
|
if (conversationFilter && typeof window !== "undefined") {
|
||||||
|
const newKey = `conversation_filter_${responseId}`;
|
||||||
|
localStorage.setItem(newKey, conversationFilter.id);
|
||||||
|
console.log("[CHAT] Saved filter association:", newKey, "=", conversationFilter.id);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
onError: (error) => {
|
onError: (error) => {
|
||||||
|
|
@ -696,11 +703,18 @@ function ChatPage() {
|
||||||
// Use passed previousResponseId if available, otherwise fall back to state
|
// Use passed previousResponseId if available, otherwise fall back to state
|
||||||
const responseIdToUse = previousResponseId || previousResponseIds[endpoint];
|
const responseIdToUse = previousResponseId || previousResponseIds[endpoint];
|
||||||
|
|
||||||
|
console.log("[CHAT] Sending streaming message:", {
|
||||||
|
conversationFilter: conversationFilter?.id,
|
||||||
|
currentConversationId,
|
||||||
|
responseIdToUse,
|
||||||
|
});
|
||||||
|
|
||||||
// Use the hook to send the message
|
// Use the hook to send the message
|
||||||
await sendStreamingMessage({
|
await sendStreamingMessage({
|
||||||
prompt: userMessage.content,
|
prompt: userMessage.content,
|
||||||
previousResponseId: responseIdToUse || undefined,
|
previousResponseId: responseIdToUse || undefined,
|
||||||
filters: processedFilters,
|
filters: processedFilters,
|
||||||
|
filter_id: conversationFilter?.id, // ✅ Add filter_id for this conversation
|
||||||
limit: parsedFilterData?.limit ?? 10,
|
limit: parsedFilterData?.limit ?? 10,
|
||||||
scoreThreshold: parsedFilterData?.scoreThreshold ?? 0,
|
scoreThreshold: parsedFilterData?.scoreThreshold ?? 0,
|
||||||
});
|
});
|
||||||
|
|
@ -781,6 +795,19 @@ function ChatPage() {
|
||||||
requestBody.previous_response_id = currentResponseId;
|
requestBody.previous_response_id = currentResponseId;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add filter_id if a filter is selected for this conversation
|
||||||
|
if (conversationFilter) {
|
||||||
|
requestBody.filter_id = conversationFilter.id;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debug logging
|
||||||
|
console.log("[DEBUG] Sending message with:", {
|
||||||
|
previous_response_id: requestBody.previous_response_id,
|
||||||
|
filter_id: requestBody.filter_id,
|
||||||
|
currentConversationId,
|
||||||
|
previousResponseIds,
|
||||||
|
});
|
||||||
|
|
||||||
const response = await fetch(apiEndpoint, {
|
const response = await fetch(apiEndpoint, {
|
||||||
method: "POST",
|
method: "POST",
|
||||||
headers: {
|
headers: {
|
||||||
|
|
@ -804,6 +831,8 @@ function ChatPage() {
|
||||||
|
|
||||||
// Store the response ID if present for this endpoint
|
// Store the response ID if present for this endpoint
|
||||||
if (result.response_id) {
|
if (result.response_id) {
|
||||||
|
console.log("[DEBUG] Received response_id:", result.response_id, "currentConversationId:", currentConversationId);
|
||||||
|
|
||||||
setPreviousResponseIds((prev) => ({
|
setPreviousResponseIds((prev) => ({
|
||||||
...prev,
|
...prev,
|
||||||
[endpoint]: result.response_id,
|
[endpoint]: result.response_id,
|
||||||
|
|
@ -811,12 +840,21 @@ function ChatPage() {
|
||||||
|
|
||||||
// If this is a new conversation (no currentConversationId), set it now
|
// If this is a new conversation (no currentConversationId), set it now
|
||||||
if (!currentConversationId) {
|
if (!currentConversationId) {
|
||||||
|
console.log("[DEBUG] Setting currentConversationId to:", result.response_id);
|
||||||
setCurrentConversationId(result.response_id);
|
setCurrentConversationId(result.response_id);
|
||||||
refreshConversations(true);
|
refreshConversations(true);
|
||||||
} else {
|
} else {
|
||||||
|
console.log("[DEBUG] Existing conversation, doing silent refresh");
|
||||||
// For existing conversations, do a silent refresh to keep backend in sync
|
// For existing conversations, do a silent refresh to keep backend in sync
|
||||||
refreshConversationsSilent();
|
refreshConversationsSilent();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Carry forward the filter association to the new response_id
|
||||||
|
if (conversationFilter && typeof window !== "undefined") {
|
||||||
|
const newKey = `conversation_filter_${result.response_id}`;
|
||||||
|
localStorage.setItem(newKey, conversationFilter.id);
|
||||||
|
console.log("[DEBUG] Saved filter association:", newKey, "=", conversationFilter.id);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
console.error("Chat failed:", result.error);
|
console.error("Chat failed:", result.error);
|
||||||
|
|
|
||||||
|
|
@ -75,6 +75,7 @@ function SearchPage() {
|
||||||
const { parsedFilterData, queryOverride } = useKnowledgeFilter();
|
const { parsedFilterData, queryOverride } = useKnowledgeFilter();
|
||||||
const [selectedRows, setSelectedRows] = useState<File[]>([]);
|
const [selectedRows, setSelectedRows] = useState<File[]>([]);
|
||||||
const [showBulkDeleteDialog, setShowBulkDeleteDialog] = useState(false);
|
const [showBulkDeleteDialog, setShowBulkDeleteDialog] = useState(false);
|
||||||
|
const lastErrorRef = useRef<string | null>(null);
|
||||||
|
|
||||||
const deleteDocumentMutation = useDeleteDocument();
|
const deleteDocumentMutation = useDeleteDocument();
|
||||||
|
|
||||||
|
|
@ -82,10 +83,28 @@ function SearchPage() {
|
||||||
refreshTasks();
|
refreshTasks();
|
||||||
}, [refreshTasks]);
|
}, [refreshTasks]);
|
||||||
|
|
||||||
const { data: searchData = [], isFetching } = useGetSearchQuery(
|
const { data: searchData = [], isFetching, error, isError } = useGetSearchQuery(
|
||||||
queryOverride,
|
queryOverride,
|
||||||
parsedFilterData,
|
parsedFilterData,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Show toast notification for search errors
|
||||||
|
useEffect(() => {
|
||||||
|
if (isError && error) {
|
||||||
|
const errorMessage = error instanceof Error ? error.message : "Search failed";
|
||||||
|
// Avoid showing duplicate toasts for the same error
|
||||||
|
if (lastErrorRef.current !== errorMessage) {
|
||||||
|
lastErrorRef.current = errorMessage;
|
||||||
|
toast.error("Search error", {
|
||||||
|
description: errorMessage,
|
||||||
|
duration: 5000,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
} else if (!isError) {
|
||||||
|
// Reset when query succeeds
|
||||||
|
lastErrorRef.current = null;
|
||||||
|
}
|
||||||
|
}, [isError, error]);
|
||||||
// Convert TaskFiles to File format and merge with backend results
|
// Convert TaskFiles to File format and merge with backend results
|
||||||
const taskFilesAsFiles: File[] = taskFiles.map((taskFile) => {
|
const taskFilesAsFiles: File[] = taskFiles.map((taskFile) => {
|
||||||
return {
|
return {
|
||||||
|
|
|
||||||
|
|
@ -209,6 +209,16 @@ const OnboardingCard = ({
|
||||||
const onboardingMutation = useOnboardingMutation({
|
const onboardingMutation = useOnboardingMutation({
|
||||||
onSuccess: (data) => {
|
onSuccess: (data) => {
|
||||||
console.log("Onboarding completed successfully", data);
|
console.log("Onboarding completed successfully", data);
|
||||||
|
|
||||||
|
// Save OpenRAG docs filter ID if sample data was ingested
|
||||||
|
if (data.openrag_docs_filter_id && typeof window !== "undefined") {
|
||||||
|
localStorage.setItem(
|
||||||
|
"onboarding_openrag_docs_filter_id",
|
||||||
|
data.openrag_docs_filter_id
|
||||||
|
);
|
||||||
|
console.log("Saved OpenRAG docs filter ID:", data.openrag_docs_filter_id);
|
||||||
|
}
|
||||||
|
|
||||||
// Update provider health cache to healthy since backend just validated
|
// Update provider health cache to healthy since backend just validated
|
||||||
const provider =
|
const provider =
|
||||||
(isEmbedding ? settings.embedding_provider : settings.llm_provider) ||
|
(isEmbedding ? settings.embedding_provider : settings.llm_provider) ||
|
||||||
|
|
|
||||||
|
|
@ -2,20 +2,30 @@
|
||||||
|
|
||||||
import { useEffect, useRef, useState } from "react";
|
import { useEffect, useRef, useState } from "react";
|
||||||
import { StickToBottom } from "use-stick-to-bottom";
|
import { StickToBottom } from "use-stick-to-bottom";
|
||||||
|
import { getFilterById } from "@/app/api/queries/useGetFilterByIdQuery";
|
||||||
import { AssistantMessage } from "@/app/chat/_components/assistant-message";
|
import { AssistantMessage } from "@/app/chat/_components/assistant-message";
|
||||||
import Nudges from "@/app/chat/_components/nudges";
|
import Nudges from "@/app/chat/_components/nudges";
|
||||||
import { UserMessage } from "@/app/chat/_components/user-message";
|
import { UserMessage } from "@/app/chat/_components/user-message";
|
||||||
import type { Message } from "@/app/chat/_types/types";
|
import type { Message, SelectedFilters } from "@/app/chat/_types/types";
|
||||||
import OnboardingCard from "@/app/onboarding/_components/onboarding-card";
|
import OnboardingCard from "@/app/onboarding/_components/onboarding-card";
|
||||||
|
import { useChat } from "@/contexts/chat-context";
|
||||||
import { useChatStreaming } from "@/hooks/useChatStreaming";
|
import { useChatStreaming } from "@/hooks/useChatStreaming";
|
||||||
import {
|
import {
|
||||||
ONBOARDING_ASSISTANT_MESSAGE_KEY,
|
ONBOARDING_ASSISTANT_MESSAGE_KEY,
|
||||||
|
ONBOARDING_OPENRAG_DOCS_FILTER_ID_KEY,
|
||||||
ONBOARDING_SELECTED_NUDGE_KEY,
|
ONBOARDING_SELECTED_NUDGE_KEY,
|
||||||
} from "@/lib/constants";
|
} from "@/lib/constants";
|
||||||
|
|
||||||
import { OnboardingStep } from "./onboarding-step";
|
import { OnboardingStep } from "./onboarding-step";
|
||||||
import OnboardingUpload from "./onboarding-upload";
|
import OnboardingUpload from "./onboarding-upload";
|
||||||
|
|
||||||
|
// Filters for OpenRAG documentation
|
||||||
|
const OPENRAG_DOCS_FILTERS: SelectedFilters = {
|
||||||
|
data_sources: ["openrag-documentation.pdf"],
|
||||||
|
document_types: [],
|
||||||
|
owners: [],
|
||||||
|
};
|
||||||
|
|
||||||
export function OnboardingContent({
|
export function OnboardingContent({
|
||||||
handleStepComplete,
|
handleStepComplete,
|
||||||
handleStepBack,
|
handleStepBack,
|
||||||
|
|
@ -25,6 +35,7 @@ export function OnboardingContent({
|
||||||
handleStepBack: () => void;
|
handleStepBack: () => void;
|
||||||
currentStep: number;
|
currentStep: number;
|
||||||
}) {
|
}) {
|
||||||
|
const { setConversationFilter, setCurrentConversationId } = useChat();
|
||||||
const parseFailedRef = useRef(false);
|
const parseFailedRef = useRef(false);
|
||||||
const [responseId, setResponseId] = useState<string | null>(null);
|
const [responseId, setResponseId] = useState<string | null>(null);
|
||||||
const [selectedNudge, setSelectedNudge] = useState<string>(() => {
|
const [selectedNudge, setSelectedNudge] = useState<string>(() => {
|
||||||
|
|
@ -70,7 +81,7 @@ export function OnboardingContent({
|
||||||
}, [handleStepBack, currentStep]);
|
}, [handleStepBack, currentStep]);
|
||||||
|
|
||||||
const { streamingMessage, isLoading, sendMessage } = useChatStreaming({
|
const { streamingMessage, isLoading, sendMessage } = useChatStreaming({
|
||||||
onComplete: (message, newResponseId) => {
|
onComplete: async (message, newResponseId) => {
|
||||||
setAssistantMessage(message);
|
setAssistantMessage(message);
|
||||||
// Save assistant message to localStorage when complete
|
// Save assistant message to localStorage when complete
|
||||||
if (typeof window !== "undefined") {
|
if (typeof window !== "undefined") {
|
||||||
|
|
@ -88,6 +99,26 @@ export function OnboardingContent({
|
||||||
}
|
}
|
||||||
if (newResponseId) {
|
if (newResponseId) {
|
||||||
setResponseId(newResponseId);
|
setResponseId(newResponseId);
|
||||||
|
|
||||||
|
// Set the current conversation ID
|
||||||
|
setCurrentConversationId(newResponseId);
|
||||||
|
|
||||||
|
// Save the filter association for this conversation
|
||||||
|
const openragDocsFilterId = localStorage.getItem(ONBOARDING_OPENRAG_DOCS_FILTER_ID_KEY);
|
||||||
|
if (openragDocsFilterId) {
|
||||||
|
try {
|
||||||
|
// Load the filter and set it in the context with explicit responseId
|
||||||
|
// This ensures the filter is saved to localStorage with the correct conversation ID
|
||||||
|
const filter = await getFilterById(openragDocsFilterId);
|
||||||
|
if (filter) {
|
||||||
|
// Pass explicit newResponseId to ensure correct localStorage association
|
||||||
|
setConversationFilter(filter, newResponseId);
|
||||||
|
console.log("[ONBOARDING] Saved filter association:", `conversation_filter_${newResponseId}`, "=", openragDocsFilterId);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Failed to associate filter with conversation:", error);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
onError: (error) => {
|
onError: (error) => {
|
||||||
|
|
@ -115,9 +146,36 @@ export function OnboardingContent({
|
||||||
localStorage.removeItem(ONBOARDING_ASSISTANT_MESSAGE_KEY);
|
localStorage.removeItem(ONBOARDING_ASSISTANT_MESSAGE_KEY);
|
||||||
}
|
}
|
||||||
setTimeout(async () => {
|
setTimeout(async () => {
|
||||||
|
// Check if we have the OpenRAG docs filter ID (sample data was ingested)
|
||||||
|
const openragDocsFilterId =
|
||||||
|
typeof window !== "undefined"
|
||||||
|
? localStorage.getItem(ONBOARDING_OPENRAG_DOCS_FILTER_ID_KEY)
|
||||||
|
: null;
|
||||||
|
|
||||||
|
// Load and set the OpenRAG docs filter if available
|
||||||
|
let filterToUse = null;
|
||||||
|
console.log("[ONBOARDING] openragDocsFilterId:", openragDocsFilterId);
|
||||||
|
if (openragDocsFilterId) {
|
||||||
|
try {
|
||||||
|
const filter = await getFilterById(openragDocsFilterId);
|
||||||
|
console.log("[ONBOARDING] Loaded filter:", filter);
|
||||||
|
if (filter) {
|
||||||
|
// Pass null to skip localStorage save - no conversation exists yet
|
||||||
|
setConversationFilter(filter, null);
|
||||||
|
filterToUse = filter;
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Failed to load OpenRAG docs filter:", error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log("[ONBOARDING] Sending message with filter_id:", filterToUse?.id);
|
||||||
await sendMessage({
|
await sendMessage({
|
||||||
prompt: nudge,
|
prompt: nudge,
|
||||||
previousResponseId: responseId || undefined,
|
previousResponseId: responseId || undefined,
|
||||||
|
// Send both filter_id and filters (selections)
|
||||||
|
filter_id: filterToUse?.id,
|
||||||
|
filters: openragDocsFilterId ? OPENRAG_DOCS_FILTERS : undefined,
|
||||||
});
|
});
|
||||||
}, 1500);
|
}, 1500);
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -1,10 +1,15 @@
|
||||||
import { AnimatePresence, motion } from "motion/react";
|
import { AnimatePresence, motion } from "motion/react";
|
||||||
import { type ChangeEvent, useEffect, useRef, useState } from "react";
|
import { type ChangeEvent, useEffect, useRef, useState } from "react";
|
||||||
|
import { toast } from "sonner";
|
||||||
|
import { useCreateFilter } from "@/app/api/mutations/useCreateFilter";
|
||||||
import { useGetNudgesQuery } from "@/app/api/queries/useGetNudgesQuery";
|
import { useGetNudgesQuery } from "@/app/api/queries/useGetNudgesQuery";
|
||||||
import { useGetTasksQuery } from "@/app/api/queries/useGetTasksQuery";
|
import { useGetTasksQuery } from "@/app/api/queries/useGetTasksQuery";
|
||||||
import { AnimatedProviderSteps } from "@/app/onboarding/_components/animated-provider-steps";
|
import { AnimatedProviderSteps } from "@/app/onboarding/_components/animated-provider-steps";
|
||||||
import { Button } from "@/components/ui/button";
|
import { Button } from "@/components/ui/button";
|
||||||
import { ONBOARDING_UPLOAD_STEPS_KEY } from "@/lib/constants";
|
import {
|
||||||
|
ONBOARDING_UPLOAD_STEPS_KEY,
|
||||||
|
ONBOARDING_USER_DOC_FILTER_ID_KEY,
|
||||||
|
} from "@/lib/constants";
|
||||||
import { uploadFile } from "@/lib/upload-utils";
|
import { uploadFile } from "@/lib/upload-utils";
|
||||||
|
|
||||||
interface OnboardingUploadProps {
|
interface OnboardingUploadProps {
|
||||||
|
|
@ -15,6 +20,11 @@ const OnboardingUpload = ({ onComplete }: OnboardingUploadProps) => {
|
||||||
const fileInputRef = useRef<HTMLInputElement>(null);
|
const fileInputRef = useRef<HTMLInputElement>(null);
|
||||||
const [isUploading, setIsUploading] = useState(false);
|
const [isUploading, setIsUploading] = useState(false);
|
||||||
const [currentStep, setCurrentStep] = useState<number | null>(null);
|
const [currentStep, setCurrentStep] = useState<number | null>(null);
|
||||||
|
const [uploadedFilename, setUploadedFilename] = useState<string | null>(null);
|
||||||
|
const [shouldCreateFilter, setShouldCreateFilter] = useState(false);
|
||||||
|
const [isCreatingFilter, setIsCreatingFilter] = useState(false);
|
||||||
|
|
||||||
|
const createFilterMutation = useCreateFilter();
|
||||||
|
|
||||||
const STEP_LIST = [
|
const STEP_LIST = [
|
||||||
"Uploading your document",
|
"Uploading your document",
|
||||||
|
|
@ -53,6 +63,60 @@ const OnboardingUpload = ({ onComplete }: OnboardingUploadProps) => {
|
||||||
// Set to final step to show "Done"
|
// Set to final step to show "Done"
|
||||||
setCurrentStep(STEP_LIST.length);
|
setCurrentStep(STEP_LIST.length);
|
||||||
|
|
||||||
|
// Create knowledge filter for uploaded document if requested
|
||||||
|
// Guard against race condition: only create if not already creating
|
||||||
|
if (shouldCreateFilter && uploadedFilename && !isCreatingFilter) {
|
||||||
|
// Reset flags immediately (synchronously) to prevent duplicate creation
|
||||||
|
setShouldCreateFilter(false);
|
||||||
|
const filename = uploadedFilename;
|
||||||
|
setUploadedFilename(null);
|
||||||
|
setIsCreatingFilter(true);
|
||||||
|
|
||||||
|
// Get display name from filename (remove extension for cleaner name)
|
||||||
|
const displayName = filename.includes(".")
|
||||||
|
? filename.substring(0, filename.lastIndexOf("."))
|
||||||
|
: filename;
|
||||||
|
|
||||||
|
const queryData = JSON.stringify({
|
||||||
|
query: "",
|
||||||
|
filters: {
|
||||||
|
data_sources: [filename],
|
||||||
|
document_types: ["*"],
|
||||||
|
owners: ["*"],
|
||||||
|
connector_types: ["*"],
|
||||||
|
},
|
||||||
|
limit: 10,
|
||||||
|
scoreThreshold: 0,
|
||||||
|
color: "green",
|
||||||
|
icon: "file",
|
||||||
|
});
|
||||||
|
|
||||||
|
createFilterMutation
|
||||||
|
.mutateAsync({
|
||||||
|
name: displayName,
|
||||||
|
description: `Filter for ${filename}`,
|
||||||
|
queryData: queryData,
|
||||||
|
})
|
||||||
|
.then((result) => {
|
||||||
|
if (result.filter?.id && typeof window !== "undefined") {
|
||||||
|
localStorage.setItem(
|
||||||
|
ONBOARDING_USER_DOC_FILTER_ID_KEY,
|
||||||
|
result.filter.id,
|
||||||
|
);
|
||||||
|
console.log(
|
||||||
|
"Created knowledge filter for uploaded document",
|
||||||
|
result.filter.id,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.catch((error) => {
|
||||||
|
console.error("Failed to create knowledge filter:", error);
|
||||||
|
})
|
||||||
|
.finally(() => {
|
||||||
|
setIsCreatingFilter(false);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
// Refetch nudges to get new ones
|
// Refetch nudges to get new ones
|
||||||
refetchNudges();
|
refetchNudges();
|
||||||
|
|
||||||
|
|
@ -61,7 +125,7 @@ const OnboardingUpload = ({ onComplete }: OnboardingUploadProps) => {
|
||||||
onComplete();
|
onComplete();
|
||||||
}, 1000);
|
}, 1000);
|
||||||
}
|
}
|
||||||
}, [tasks, currentStep, onComplete, refetchNudges]);
|
}, [tasks, currentStep, onComplete, refetchNudges, shouldCreateFilter, uploadedFilename]);
|
||||||
|
|
||||||
const resetFileInput = () => {
|
const resetFileInput = () => {
|
||||||
if (fileInputRef.current) {
|
if (fileInputRef.current) {
|
||||||
|
|
@ -77,14 +141,29 @@ const OnboardingUpload = ({ onComplete }: OnboardingUploadProps) => {
|
||||||
setIsUploading(true);
|
setIsUploading(true);
|
||||||
try {
|
try {
|
||||||
setCurrentStep(0);
|
setCurrentStep(0);
|
||||||
await uploadFile(file, true);
|
const result = await uploadFile(file, true, true); // Pass createFilter=true
|
||||||
console.log("Document upload task started successfully");
|
console.log("Document upload task started successfully");
|
||||||
|
|
||||||
|
// Store filename and createFilter flag in state to create filter after ingestion succeeds
|
||||||
|
if (result.createFilter && result.filename) {
|
||||||
|
setUploadedFilename(result.filename);
|
||||||
|
setShouldCreateFilter(true);
|
||||||
|
}
|
||||||
|
|
||||||
// Move to processing step - task monitoring will handle completion
|
// Move to processing step - task monitoring will handle completion
|
||||||
setTimeout(() => {
|
setTimeout(() => {
|
||||||
setCurrentStep(1);
|
setCurrentStep(1);
|
||||||
}, 1500);
|
}, 1500);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error("Upload failed", (error as Error).message);
|
const errorMessage = error instanceof Error ? error.message : "Upload failed";
|
||||||
|
console.error("Upload failed", errorMessage);
|
||||||
|
|
||||||
|
// Show error toast notification
|
||||||
|
toast.error("Document upload failed", {
|
||||||
|
description: errorMessage,
|
||||||
|
duration: 5000,
|
||||||
|
});
|
||||||
|
|
||||||
// Reset on error
|
// Reset on error
|
||||||
setCurrentStep(null);
|
setCurrentStep(null);
|
||||||
} finally {
|
} finally {
|
||||||
|
|
|
||||||
|
|
@ -50,7 +50,12 @@ export function OpenAIOnboarding({
|
||||||
: debouncedApiKey
|
: debouncedApiKey
|
||||||
? { apiKey: debouncedApiKey }
|
? { apiKey: debouncedApiKey }
|
||||||
: undefined,
|
: undefined,
|
||||||
{ enabled: debouncedApiKey !== "" || getFromEnv || alreadyConfigured },
|
{
|
||||||
|
// Only validate when the user opts in (env) or provides a key.
|
||||||
|
// If a key was previously configured, let the user decide to reuse or replace it
|
||||||
|
// without triggering an immediate validation error.
|
||||||
|
enabled: debouncedApiKey !== "" || getFromEnv,
|
||||||
|
},
|
||||||
);
|
);
|
||||||
// Use custom hook for model selection logic
|
// Use custom hook for model selection logic
|
||||||
const {
|
const {
|
||||||
|
|
@ -134,11 +139,12 @@ export function OpenAIOnboarding({
|
||||||
}
|
}
|
||||||
value={apiKey}
|
value={apiKey}
|
||||||
onChange={(e) => setApiKey(e.target.value)}
|
onChange={(e) => setApiKey(e.target.value)}
|
||||||
disabled={alreadyConfigured}
|
// Even if a key exists, allow replacing it to avoid getting stuck on stale creds.
|
||||||
|
disabled={false}
|
||||||
/>
|
/>
|
||||||
{alreadyConfigured && (
|
{alreadyConfigured && (
|
||||||
<p className="text-mmd text-muted-foreground">
|
<p className="text-mmd text-muted-foreground">
|
||||||
Reusing key from model provider selection.
|
Existing OpenAI key detected. You can reuse it or enter a new one.
|
||||||
</p>
|
</p>
|
||||||
)}
|
)}
|
||||||
{isLoadingModels && (
|
{isLoadingModels && (
|
||||||
|
|
|
||||||
|
|
@ -1,12 +1,13 @@
|
||||||
"use client";
|
"use client";
|
||||||
|
|
||||||
import { motion } from "framer-motion";
|
import { motion } from "framer-motion";
|
||||||
import { usePathname } from "next/navigation";
|
import { usePathname, useRouter } from "next/navigation";
|
||||||
import { useEffect, useState } from "react";
|
import { useCallback, useEffect, useState } from "react";
|
||||||
import {
|
import {
|
||||||
type ChatConversation,
|
type ChatConversation,
|
||||||
useGetConversationsQuery,
|
useGetConversationsQuery,
|
||||||
} from "@/app/api/queries/useGetConversationsQuery";
|
} from "@/app/api/queries/useGetConversationsQuery";
|
||||||
|
import { getFilterById } from "@/app/api/queries/useGetFilterByIdQuery";
|
||||||
import type { Settings } from "@/app/api/queries/useGetSettingsQuery";
|
import type { Settings } from "@/app/api/queries/useGetSettingsQuery";
|
||||||
import { OnboardingContent } from "@/app/onboarding/_components/onboarding-content";
|
import { OnboardingContent } from "@/app/onboarding/_components/onboarding-content";
|
||||||
import { ProgressBar } from "@/app/onboarding/_components/progress-bar";
|
import { ProgressBar } from "@/app/onboarding/_components/progress-bar";
|
||||||
|
|
@ -20,9 +21,11 @@ import {
|
||||||
HEADER_HEIGHT,
|
HEADER_HEIGHT,
|
||||||
ONBOARDING_ASSISTANT_MESSAGE_KEY,
|
ONBOARDING_ASSISTANT_MESSAGE_KEY,
|
||||||
ONBOARDING_CARD_STEPS_KEY,
|
ONBOARDING_CARD_STEPS_KEY,
|
||||||
|
ONBOARDING_OPENRAG_DOCS_FILTER_ID_KEY,
|
||||||
ONBOARDING_SELECTED_NUDGE_KEY,
|
ONBOARDING_SELECTED_NUDGE_KEY,
|
||||||
ONBOARDING_STEP_KEY,
|
ONBOARDING_STEP_KEY,
|
||||||
ONBOARDING_UPLOAD_STEPS_KEY,
|
ONBOARDING_UPLOAD_STEPS_KEY,
|
||||||
|
ONBOARDING_USER_DOC_FILTER_ID_KEY,
|
||||||
SIDEBAR_WIDTH,
|
SIDEBAR_WIDTH,
|
||||||
TOTAL_ONBOARDING_STEPS,
|
TOTAL_ONBOARDING_STEPS,
|
||||||
} from "@/lib/constants";
|
} from "@/lib/constants";
|
||||||
|
|
@ -36,12 +39,16 @@ export function ChatRenderer({
|
||||||
children: React.ReactNode;
|
children: React.ReactNode;
|
||||||
}) {
|
}) {
|
||||||
const pathname = usePathname();
|
const pathname = usePathname();
|
||||||
|
const router = useRouter();
|
||||||
const { isAuthenticated, isNoAuthMode } = useAuth();
|
const { isAuthenticated, isNoAuthMode } = useAuth();
|
||||||
const {
|
const {
|
||||||
endpoint,
|
endpoint,
|
||||||
refreshTrigger,
|
refreshTrigger,
|
||||||
refreshConversations,
|
refreshConversations,
|
||||||
startNewConversation,
|
startNewConversation,
|
||||||
|
setConversationFilter,
|
||||||
|
setCurrentConversationId,
|
||||||
|
setPreviousResponseIds,
|
||||||
} = useChat();
|
} = useChat();
|
||||||
|
|
||||||
// Initialize onboarding state based on local storage and settings
|
// Initialize onboarding state based on local storage and settings
|
||||||
|
|
@ -71,6 +78,78 @@ export function ChatRenderer({
|
||||||
startNewConversation();
|
startNewConversation();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Navigate to /chat when onboarding is active so animation reveals chat underneath
|
||||||
|
useEffect(() => {
|
||||||
|
if (!showLayout && pathname !== "/chat" && pathname !== "/") {
|
||||||
|
router.push("/chat");
|
||||||
|
}
|
||||||
|
}, [showLayout, pathname, router]);
|
||||||
|
|
||||||
|
// Helper to store default filter ID for new conversations after onboarding
|
||||||
|
const storeDefaultFilterForNewConversations = useCallback(
|
||||||
|
async (preferUserDoc: boolean) => {
|
||||||
|
if (typeof window === "undefined") return;
|
||||||
|
|
||||||
|
// Check if we already have a default filter set
|
||||||
|
const existingDefault = localStorage.getItem("default_conversation_filter_id");
|
||||||
|
if (existingDefault) {
|
||||||
|
console.log("[FILTER] Default filter already set:", existingDefault);
|
||||||
|
// Try to apply it to context state (don't save to localStorage to avoid overwriting)
|
||||||
|
try {
|
||||||
|
const filter = await getFilterById(existingDefault);
|
||||||
|
if (filter) {
|
||||||
|
// Pass null to skip localStorage save
|
||||||
|
setConversationFilter(filter, null);
|
||||||
|
return; // Successfully loaded and set, we're done
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Failed to load existing default filter, will set new one:", error);
|
||||||
|
// Filter doesn't exist anymore, clear it and continue to set a new one
|
||||||
|
localStorage.removeItem("default_conversation_filter_id");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to get the appropriate filter ID
|
||||||
|
let filterId: string | null = null;
|
||||||
|
|
||||||
|
if (preferUserDoc) {
|
||||||
|
// Completed full onboarding - prefer user document filter
|
||||||
|
filterId = localStorage.getItem(ONBOARDING_USER_DOC_FILTER_ID_KEY);
|
||||||
|
console.log("[FILTER] User doc filter ID:", filterId);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fall back to OpenRAG docs filter
|
||||||
|
if (!filterId) {
|
||||||
|
filterId = localStorage.getItem(ONBOARDING_OPENRAG_DOCS_FILTER_ID_KEY);
|
||||||
|
console.log("[FILTER] OpenRAG docs filter ID:", filterId);
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log("[FILTER] Final filter ID to use:", filterId);
|
||||||
|
|
||||||
|
if (filterId) {
|
||||||
|
// Store this as the default filter for new conversations
|
||||||
|
localStorage.setItem("default_conversation_filter_id", filterId);
|
||||||
|
|
||||||
|
// Apply filter to context state only (don't save to localStorage since there's no conversation yet)
|
||||||
|
// The default_conversation_filter_id will be used when a new conversation is started
|
||||||
|
try {
|
||||||
|
const filter = await getFilterById(filterId);
|
||||||
|
console.log("[FILTER] Loaded filter:", filter);
|
||||||
|
if (filter) {
|
||||||
|
// Pass null to skip localStorage save - this prevents overwriting existing conversation filters
|
||||||
|
setConversationFilter(filter, null);
|
||||||
|
console.log("[FILTER] Set conversation filter (no save):", filter.id);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Failed to set onboarding filter:", error);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
console.log("[FILTER] No filter ID found, not setting default");
|
||||||
|
}
|
||||||
|
},
|
||||||
|
[setConversationFilter]
|
||||||
|
);
|
||||||
|
|
||||||
// Save current step to local storage whenever it changes
|
// Save current step to local storage whenever it changes
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (typeof window !== "undefined" && !showLayout) {
|
if (typeof window !== "undefined" && !showLayout) {
|
||||||
|
|
@ -78,7 +157,7 @@ export function ChatRenderer({
|
||||||
}
|
}
|
||||||
}, [currentStep, showLayout]);
|
}, [currentStep, showLayout]);
|
||||||
|
|
||||||
const handleStepComplete = () => {
|
const handleStepComplete = async () => {
|
||||||
if (currentStep < TOTAL_ONBOARDING_STEPS - 1) {
|
if (currentStep < TOTAL_ONBOARDING_STEPS - 1) {
|
||||||
setCurrentStep(currentStep + 1);
|
setCurrentStep(currentStep + 1);
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -90,6 +169,20 @@ export function ChatRenderer({
|
||||||
localStorage.removeItem(ONBOARDING_CARD_STEPS_KEY);
|
localStorage.removeItem(ONBOARDING_CARD_STEPS_KEY);
|
||||||
localStorage.removeItem(ONBOARDING_UPLOAD_STEPS_KEY);
|
localStorage.removeItem(ONBOARDING_UPLOAD_STEPS_KEY);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Clear ALL conversation state so next message starts fresh
|
||||||
|
await startNewConversation();
|
||||||
|
|
||||||
|
// Store the user document filter as default for new conversations and load it
|
||||||
|
await storeDefaultFilterForNewConversations(true);
|
||||||
|
|
||||||
|
// Clean up onboarding filter IDs now that we've set the default
|
||||||
|
if (typeof window !== "undefined") {
|
||||||
|
localStorage.removeItem(ONBOARDING_OPENRAG_DOCS_FILTER_ID_KEY);
|
||||||
|
localStorage.removeItem(ONBOARDING_USER_DOC_FILTER_ID_KEY);
|
||||||
|
console.log("[FILTER] Cleaned up onboarding filter IDs");
|
||||||
|
}
|
||||||
|
|
||||||
setShowLayout(true);
|
setShowLayout(true);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
@ -109,6 +202,8 @@ export function ChatRenderer({
|
||||||
localStorage.removeItem(ONBOARDING_CARD_STEPS_KEY);
|
localStorage.removeItem(ONBOARDING_CARD_STEPS_KEY);
|
||||||
localStorage.removeItem(ONBOARDING_UPLOAD_STEPS_KEY);
|
localStorage.removeItem(ONBOARDING_UPLOAD_STEPS_KEY);
|
||||||
}
|
}
|
||||||
|
// Store the OpenRAG docs filter as default for new conversations
|
||||||
|
storeDefaultFilterForNewConversations(false);
|
||||||
setShowLayout(true);
|
setShowLayout(true);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -465,6 +465,7 @@ export function KnowledgeFilterPanel() {
|
||||||
disabled={isSaving}
|
disabled={isSaving}
|
||||||
variant="outline"
|
variant="outline"
|
||||||
size="sm"
|
size="sm"
|
||||||
|
className="relative z-10"
|
||||||
>
|
>
|
||||||
Cancel
|
Cancel
|
||||||
</Button>
|
</Button>
|
||||||
|
|
@ -475,6 +476,7 @@ export function KnowledgeFilterPanel() {
|
||||||
size="sm"
|
size="sm"
|
||||||
onClick={handleDeleteFilter}
|
onClick={handleDeleteFilter}
|
||||||
disabled={isSaving}
|
disabled={isSaving}
|
||||||
|
className="relative z-10"
|
||||||
>
|
>
|
||||||
Delete Filter
|
Delete Filter
|
||||||
</Button>
|
</Button>
|
||||||
|
|
@ -483,7 +485,7 @@ export function KnowledgeFilterPanel() {
|
||||||
onClick={handleSaveConfiguration}
|
onClick={handleSaveConfiguration}
|
||||||
disabled={isSaving}
|
disabled={isSaving}
|
||||||
size="sm"
|
size="sm"
|
||||||
className="relative"
|
className="relative z-10"
|
||||||
>
|
>
|
||||||
{isSaving && (
|
{isSaving && (
|
||||||
<>
|
<>
|
||||||
|
|
|
||||||
|
|
@ -289,7 +289,7 @@ export function Navigation({
|
||||||
handleNewConversation();
|
handleNewConversation();
|
||||||
} else if (activeConvo) {
|
} else if (activeConvo) {
|
||||||
loadConversation(activeConvo);
|
loadConversation(activeConvo);
|
||||||
refreshConversations();
|
// Don't call refreshConversations here - it causes unnecessary refetches
|
||||||
} else if (
|
} else if (
|
||||||
conversations.length > 0 &&
|
conversations.length > 0 &&
|
||||||
currentConversationId === null &&
|
currentConversationId === null &&
|
||||||
|
|
@ -473,7 +473,7 @@ export function Navigation({
|
||||||
onClick={() => {
|
onClick={() => {
|
||||||
if (loading || isConversationsLoading) return;
|
if (loading || isConversationsLoading) return;
|
||||||
loadConversation(conversation);
|
loadConversation(conversation);
|
||||||
refreshConversations();
|
// Don't refresh - just loading an existing conversation
|
||||||
}}
|
}}
|
||||||
disabled={loading || isConversationsLoading}
|
disabled={loading || isConversationsLoading}
|
||||||
>
|
>
|
||||||
|
|
|
||||||
|
|
@ -65,7 +65,7 @@ interface ChatContextType {
|
||||||
refreshConversationsSilent: () => Promise<void>;
|
refreshConversationsSilent: () => Promise<void>;
|
||||||
refreshTrigger: number;
|
refreshTrigger: number;
|
||||||
refreshTriggerSilent: number;
|
refreshTriggerSilent: number;
|
||||||
loadConversation: (conversation: ConversationData) => void;
|
loadConversation: (conversation: ConversationData) => Promise<void>;
|
||||||
startNewConversation: () => void;
|
startNewConversation: () => void;
|
||||||
conversationData: ConversationData | null;
|
conversationData: ConversationData | null;
|
||||||
forkFromResponse: (responseId: string) => void;
|
forkFromResponse: (responseId: string) => void;
|
||||||
|
|
@ -77,7 +77,8 @@ interface ChatContextType {
|
||||||
conversationLoaded: boolean;
|
conversationLoaded: boolean;
|
||||||
setConversationLoaded: (loaded: boolean) => void;
|
setConversationLoaded: (loaded: boolean) => void;
|
||||||
conversationFilter: KnowledgeFilter | null;
|
conversationFilter: KnowledgeFilter | null;
|
||||||
setConversationFilter: (filter: KnowledgeFilter | null) => void;
|
// responseId: undefined = use currentConversationId, null = don't save to localStorage
|
||||||
|
setConversationFilter: (filter: KnowledgeFilter | null, responseId?: string | null) => void;
|
||||||
}
|
}
|
||||||
|
|
||||||
const ChatContext = createContext<ChatContextType | undefined>(undefined);
|
const ChatContext = createContext<ChatContextType | undefined>(undefined);
|
||||||
|
|
@ -112,6 +113,8 @@ export function ChatProvider({ children }: ChatProviderProps) {
|
||||||
const refreshTimeoutRef = useRef<NodeJS.Timeout | null>(null);
|
const refreshTimeoutRef = useRef<NodeJS.Timeout | null>(null);
|
||||||
|
|
||||||
const refreshConversations = useCallback((force = false) => {
|
const refreshConversations = useCallback((force = false) => {
|
||||||
|
console.log("[REFRESH] refreshConversations called, force:", force);
|
||||||
|
|
||||||
if (force) {
|
if (force) {
|
||||||
// Immediate refresh for important updates like new conversations
|
// Immediate refresh for important updates like new conversations
|
||||||
setRefreshTrigger((prev) => prev + 1);
|
setRefreshTrigger((prev) => prev + 1);
|
||||||
|
|
@ -145,22 +148,59 @@ export function ChatProvider({ children }: ChatProviderProps) {
|
||||||
}, []);
|
}, []);
|
||||||
|
|
||||||
const loadConversation = useCallback(
|
const loadConversation = useCallback(
|
||||||
(conversation: ConversationData) => {
|
async (conversation: ConversationData) => {
|
||||||
|
console.log("[CONVERSATION] Loading conversation:", {
|
||||||
|
conversationId: conversation.response_id,
|
||||||
|
title: conversation.title,
|
||||||
|
endpoint: conversation.endpoint,
|
||||||
|
});
|
||||||
|
|
||||||
setCurrentConversationId(conversation.response_id);
|
setCurrentConversationId(conversation.response_id);
|
||||||
setEndpoint(conversation.endpoint);
|
setEndpoint(conversation.endpoint);
|
||||||
// Store the full conversation data for the chat page to use
|
// Store the full conversation data for the chat page to use
|
||||||
setConversationData(conversation);
|
setConversationData(conversation);
|
||||||
|
|
||||||
// Load the filter if one exists for this conversation
|
// Load the filter if one exists for this conversation
|
||||||
// Only update the filter if this is a different conversation (to preserve user's filter selection)
|
// Always update the filter to match the conversation being loaded
|
||||||
setConversationFilterState((currentFilter) => {
|
const isDifferentConversation =
|
||||||
// If we're loading a different conversation, load its filter
|
conversation.response_id !== conversationData?.response_id;
|
||||||
// Otherwise keep the current filter (don't reset it when conversation refreshes)
|
|
||||||
const isDifferentConversation =
|
if (isDifferentConversation && typeof window !== "undefined") {
|
||||||
conversation.response_id !== conversationData?.response_id;
|
// Try to load the saved filter from localStorage
|
||||||
return isDifferentConversation
|
const savedFilterId = localStorage.getItem(`conversation_filter_${conversation.response_id}`);
|
||||||
? conversation.filter || null
|
console.log("[CONVERSATION] Looking for filter:", {
|
||||||
: currentFilter;
|
conversationId: conversation.response_id,
|
||||||
});
|
savedFilterId,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (savedFilterId) {
|
||||||
|
// Import getFilterById dynamically to avoid circular dependency
|
||||||
|
const { getFilterById } = await import("@/app/api/queries/useGetFilterByIdQuery");
|
||||||
|
try {
|
||||||
|
const filter = await getFilterById(savedFilterId);
|
||||||
|
|
||||||
|
if (filter) {
|
||||||
|
console.log("[CONVERSATION] Loaded filter:", filter.name, filter.id);
|
||||||
|
setConversationFilterState(filter);
|
||||||
|
// Update conversation data with the loaded filter
|
||||||
|
setConversationData((prev) => {
|
||||||
|
if (!prev) return prev;
|
||||||
|
return { ...prev, filter };
|
||||||
|
});
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error("[CONVERSATION] Failed to load filter:", error);
|
||||||
|
// Filter was deleted, clean up localStorage
|
||||||
|
localStorage.removeItem(`conversation_filter_${conversation.response_id}`);
|
||||||
|
setConversationFilterState(null);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// No saved filter in localStorage, clear the current filter
|
||||||
|
console.log("[CONVERSATION] No filter found for this conversation");
|
||||||
|
setConversationFilterState(null);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Clear placeholder when loading a real conversation
|
// Clear placeholder when loading a real conversation
|
||||||
setPlaceholderConversation(null);
|
setPlaceholderConversation(null);
|
||||||
setConversationLoaded(true);
|
setConversationLoaded(true);
|
||||||
|
|
@ -170,15 +210,48 @@ export function ChatProvider({ children }: ChatProviderProps) {
|
||||||
[conversationData?.response_id],
|
[conversationData?.response_id],
|
||||||
);
|
);
|
||||||
|
|
||||||
const startNewConversation = useCallback(() => {
|
const startNewConversation = useCallback(async () => {
|
||||||
|
console.log("[CONVERSATION] Starting new conversation");
|
||||||
|
|
||||||
// Clear current conversation data and reset state
|
// Clear current conversation data and reset state
|
||||||
setCurrentConversationId(null);
|
setCurrentConversationId(null);
|
||||||
setPreviousResponseIds({ chat: null, langflow: null });
|
setPreviousResponseIds({ chat: null, langflow: null });
|
||||||
setConversationData(null);
|
setConversationData(null);
|
||||||
setConversationDocs([]);
|
setConversationDocs([]);
|
||||||
setConversationLoaded(false);
|
setConversationLoaded(false);
|
||||||
// Clear the filter when starting a new conversation
|
|
||||||
setConversationFilterState(null);
|
// Load default filter if available (and clear it after first use)
|
||||||
|
if (typeof window !== "undefined") {
|
||||||
|
const defaultFilterId = localStorage.getItem("default_conversation_filter_id");
|
||||||
|
console.log("[CONVERSATION] Default filter ID:", defaultFilterId);
|
||||||
|
|
||||||
|
if (defaultFilterId) {
|
||||||
|
// Clear the default filter now so it's only used once
|
||||||
|
localStorage.removeItem("default_conversation_filter_id");
|
||||||
|
console.log("[CONVERSATION] Cleared default filter (used once)");
|
||||||
|
|
||||||
|
try {
|
||||||
|
const { getFilterById } = await import("@/app/api/queries/useGetFilterByIdQuery");
|
||||||
|
const filter = await getFilterById(defaultFilterId);
|
||||||
|
|
||||||
|
if (filter) {
|
||||||
|
console.log("[CONVERSATION] Loaded default filter:", filter.name, filter.id);
|
||||||
|
setConversationFilterState(filter);
|
||||||
|
} else {
|
||||||
|
// Default filter was deleted
|
||||||
|
setConversationFilterState(null);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error("[CONVERSATION] Failed to load default filter:", error);
|
||||||
|
setConversationFilterState(null);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
console.log("[CONVERSATION] No default filter set");
|
||||||
|
setConversationFilterState(null);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
setConversationFilterState(null);
|
||||||
|
}
|
||||||
|
|
||||||
// Create a temporary placeholder conversation to show in sidebar
|
// Create a temporary placeholder conversation to show in sidebar
|
||||||
const placeholderConversation: ConversationData = {
|
const placeholderConversation: ConversationData = {
|
||||||
|
|
@ -230,7 +303,7 @@ export function ChatProvider({ children }: ChatProviderProps) {
|
||||||
);
|
);
|
||||||
|
|
||||||
const setConversationFilter = useCallback(
|
const setConversationFilter = useCallback(
|
||||||
(filter: KnowledgeFilter | null) => {
|
(filter: KnowledgeFilter | null, responseId?: string | null) => {
|
||||||
setConversationFilterState(filter);
|
setConversationFilterState(filter);
|
||||||
// Update the conversation data to include the filter
|
// Update the conversation data to include the filter
|
||||||
setConversationData((prev) => {
|
setConversationData((prev) => {
|
||||||
|
|
@ -240,8 +313,24 @@ export function ChatProvider({ children }: ChatProviderProps) {
|
||||||
filter,
|
filter,
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Determine which conversation ID to use for saving
|
||||||
|
// - undefined: use currentConversationId (default behavior)
|
||||||
|
// - null: explicitly skip saving to localStorage
|
||||||
|
// - string: use the provided responseId
|
||||||
|
const targetId = responseId === undefined ? currentConversationId : responseId;
|
||||||
|
|
||||||
|
// Save filter association for the target conversation
|
||||||
|
if (typeof window !== "undefined" && targetId) {
|
||||||
|
const key = `conversation_filter_${targetId}`;
|
||||||
|
if (filter) {
|
||||||
|
localStorage.setItem(key, filter.id);
|
||||||
|
} else {
|
||||||
|
localStorage.removeItem(key);
|
||||||
|
}
|
||||||
|
}
|
||||||
},
|
},
|
||||||
[],
|
[currentConversationId],
|
||||||
);
|
);
|
||||||
|
|
||||||
const value = useMemo<ChatContextType>(
|
const value = useMemo<ChatContextType>(
|
||||||
|
|
|
||||||
|
|
@ -4,6 +4,7 @@ import type {
|
||||||
Message,
|
Message,
|
||||||
SelectedFilters,
|
SelectedFilters,
|
||||||
} from "@/app/chat/_types/types";
|
} from "@/app/chat/_types/types";
|
||||||
|
import { useChat } from "@/contexts/chat-context";
|
||||||
|
|
||||||
interface UseChatStreamingOptions {
|
interface UseChatStreamingOptions {
|
||||||
endpoint?: string;
|
endpoint?: string;
|
||||||
|
|
@ -15,6 +16,7 @@ interface SendMessageOptions {
|
||||||
prompt: string;
|
prompt: string;
|
||||||
previousResponseId?: string;
|
previousResponseId?: string;
|
||||||
filters?: SelectedFilters;
|
filters?: SelectedFilters;
|
||||||
|
filter_id?: string;
|
||||||
limit?: number;
|
limit?: number;
|
||||||
scoreThreshold?: number;
|
scoreThreshold?: number;
|
||||||
}
|
}
|
||||||
|
|
@ -31,10 +33,13 @@ export function useChatStreaming({
|
||||||
const streamAbortRef = useRef<AbortController | null>(null);
|
const streamAbortRef = useRef<AbortController | null>(null);
|
||||||
const streamIdRef = useRef(0);
|
const streamIdRef = useRef(0);
|
||||||
|
|
||||||
|
const { refreshConversations } = useChat();
|
||||||
|
|
||||||
const sendMessage = async ({
|
const sendMessage = async ({
|
||||||
prompt,
|
prompt,
|
||||||
previousResponseId,
|
previousResponseId,
|
||||||
filters,
|
filters,
|
||||||
|
filter_id,
|
||||||
limit = 10,
|
limit = 10,
|
||||||
scoreThreshold = 0,
|
scoreThreshold = 0,
|
||||||
}: SendMessageOptions) => {
|
}: SendMessageOptions) => {
|
||||||
|
|
@ -73,6 +78,7 @@ export function useChatStreaming({
|
||||||
stream: boolean;
|
stream: boolean;
|
||||||
previous_response_id?: string;
|
previous_response_id?: string;
|
||||||
filters?: SelectedFilters;
|
filters?: SelectedFilters;
|
||||||
|
filter_id?: string;
|
||||||
limit?: number;
|
limit?: number;
|
||||||
scoreThreshold?: number;
|
scoreThreshold?: number;
|
||||||
} = {
|
} = {
|
||||||
|
|
@ -90,6 +96,12 @@ export function useChatStreaming({
|
||||||
requestBody.filters = filters;
|
requestBody.filters = filters;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (filter_id) {
|
||||||
|
requestBody.filter_id = filter_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log("[useChatStreaming] Sending request:", { filter_id, requestBody });
|
||||||
|
|
||||||
const response = await fetch(endpoint, {
|
const response = await fetch(endpoint, {
|
||||||
method: "POST",
|
method: "POST",
|
||||||
headers: {
|
headers: {
|
||||||
|
|
@ -489,6 +501,7 @@ export function useChatStreaming({
|
||||||
// Clear streaming message and call onComplete with final message
|
// Clear streaming message and call onComplete with final message
|
||||||
setStreamingMessage(null);
|
setStreamingMessage(null);
|
||||||
onComplete?.(finalMessage, newResponseId);
|
onComplete?.(finalMessage, newResponseId);
|
||||||
|
refreshConversations(true);
|
||||||
return finalMessage;
|
return finalMessage;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -45,6 +45,8 @@ export const ONBOARDING_ASSISTANT_MESSAGE_KEY = "onboarding_assistant_message";
|
||||||
export const ONBOARDING_SELECTED_NUDGE_KEY = "onboarding_selected_nudge";
|
export const ONBOARDING_SELECTED_NUDGE_KEY = "onboarding_selected_nudge";
|
||||||
export const ONBOARDING_CARD_STEPS_KEY = "onboarding_card_steps";
|
export const ONBOARDING_CARD_STEPS_KEY = "onboarding_card_steps";
|
||||||
export const ONBOARDING_UPLOAD_STEPS_KEY = "onboarding_upload_steps";
|
export const ONBOARDING_UPLOAD_STEPS_KEY = "onboarding_upload_steps";
|
||||||
|
export const ONBOARDING_OPENRAG_DOCS_FILTER_ID_KEY = "onboarding_openrag_docs_filter_id";
|
||||||
|
export const ONBOARDING_USER_DOC_FILTER_ID_KEY = "onboarding_user_doc_filter_id";
|
||||||
|
|
||||||
export const FILES_REGEX =
|
export const FILES_REGEX =
|
||||||
/(?<=I'm uploading a document called ['"])[^'"]+\.[^.]+(?=['"]\. Here is its content:)/;
|
/(?<=I'm uploading a document called ['"])[^'"]+\.[^.]+(?=['"]\. Here is its content:)/;
|
||||||
|
|
|
||||||
|
|
@ -10,6 +10,8 @@ export interface UploadFileResult {
|
||||||
deletion: unknown;
|
deletion: unknown;
|
||||||
unified: boolean;
|
unified: boolean;
|
||||||
raw: unknown;
|
raw: unknown;
|
||||||
|
createFilter?: boolean;
|
||||||
|
filename?: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
export async function duplicateCheck(
|
export async function duplicateCheck(
|
||||||
|
|
@ -120,11 +122,15 @@ export async function uploadFileForContext(
|
||||||
export async function uploadFile(
|
export async function uploadFile(
|
||||||
file: File,
|
file: File,
|
||||||
replace = false,
|
replace = false,
|
||||||
|
createFilter = false,
|
||||||
): Promise<UploadFileResult> {
|
): Promise<UploadFileResult> {
|
||||||
try {
|
try {
|
||||||
const formData = new FormData();
|
const formData = new FormData();
|
||||||
formData.append("file", file);
|
formData.append("file", file);
|
||||||
formData.append("replace_duplicates", replace.toString());
|
formData.append("replace_duplicates", replace.toString());
|
||||||
|
if (createFilter) {
|
||||||
|
formData.append("create_filter", "true");
|
||||||
|
}
|
||||||
|
|
||||||
const uploadResponse = await fetch("/api/router/upload_ingest", {
|
const uploadResponse = await fetch("/api/router/upload_ingest", {
|
||||||
method: "POST",
|
method: "POST",
|
||||||
|
|
@ -177,6 +183,11 @@ export async function uploadFile(
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const shouldCreateFilter = (uploadIngestJson as { create_filter?: boolean })
|
||||||
|
.create_filter;
|
||||||
|
const filename = (uploadIngestJson as { filename?: string })
|
||||||
|
.filename;
|
||||||
|
|
||||||
const result: UploadFileResult = {
|
const result: UploadFileResult = {
|
||||||
fileId,
|
fileId,
|
||||||
filePath,
|
filePath,
|
||||||
|
|
@ -184,6 +195,8 @@ export async function uploadFile(
|
||||||
deletion: deletionJson,
|
deletion: deletionJson,
|
||||||
unified: true,
|
unified: true,
|
||||||
raw: uploadIngestJson,
|
raw: uploadIngestJson,
|
||||||
|
createFilter: shouldCreateFilter,
|
||||||
|
filename,
|
||||||
};
|
};
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
|
|
|
||||||
48
src/agent.py
48
src/agent.py
|
|
@ -1,3 +1,5 @@
|
||||||
|
from http.client import HTTPException
|
||||||
|
|
||||||
from utils.logging_config import get_logger
|
from utils.logging_config import get_logger
|
||||||
|
|
||||||
logger = get_logger(__name__)
|
logger = get_logger(__name__)
|
||||||
|
|
@ -67,6 +69,7 @@ def store_conversation_thread(user_id: str, response_id: str, conversation_state
|
||||||
"created_at": conversation_state.get("created_at"),
|
"created_at": conversation_state.get("created_at"),
|
||||||
"last_activity": conversation_state.get("last_activity"),
|
"last_activity": conversation_state.get("last_activity"),
|
||||||
"previous_response_id": conversation_state.get("previous_response_id"),
|
"previous_response_id": conversation_state.get("previous_response_id"),
|
||||||
|
"filter_id": conversation_state.get("filter_id"),
|
||||||
"total_messages": len(
|
"total_messages": len(
|
||||||
[msg for msg in messages if msg.get("role") in ["user", "assistant"]]
|
[msg for msg in messages if msg.get("role") in ["user", "assistant"]]
|
||||||
),
|
),
|
||||||
|
|
@ -219,15 +222,26 @@ async def async_response(
|
||||||
|
|
||||||
response = await client.responses.create(**request_params)
|
response = await client.responses.create(**request_params)
|
||||||
|
|
||||||
response_text = response.output_text
|
# Check if response has output_text using getattr to avoid issues with special objects
|
||||||
logger.info("Response generated", log_prefix=log_prefix, response=response_text)
|
output_text = getattr(response, "output_text", None)
|
||||||
|
if output_text is not None:
|
||||||
|
response_text = output_text
|
||||||
|
logger.info("Response generated", log_prefix=log_prefix, response=response_text)
|
||||||
|
|
||||||
# Extract and store response_id if available
|
# Extract and store response_id if available
|
||||||
response_id = getattr(response, "id", None) or getattr(
|
response_id = getattr(response, "id", None) or getattr(
|
||||||
response, "response_id", None
|
response, "response_id", None
|
||||||
)
|
)
|
||||||
|
|
||||||
return response_text, response_id, response
|
return response_text, response_id, response
|
||||||
|
else:
|
||||||
|
msg = "Nudge response missing output_text"
|
||||||
|
error = getattr(response, "error", None)
|
||||||
|
if error:
|
||||||
|
error_msg = getattr(error, "message", None)
|
||||||
|
if error_msg:
|
||||||
|
msg = error_msg
|
||||||
|
raise ValueError(msg)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error("Exception in non-streaming response", error=str(e))
|
logger.error("Exception in non-streaming response", error=str(e))
|
||||||
import traceback
|
import traceback
|
||||||
|
|
@ -314,6 +328,7 @@ async def async_chat(
|
||||||
user_id: str,
|
user_id: str,
|
||||||
model: str = "gpt-4.1-mini",
|
model: str = "gpt-4.1-mini",
|
||||||
previous_response_id: str = None,
|
previous_response_id: str = None,
|
||||||
|
filter_id: str = None,
|
||||||
):
|
):
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"async_chat called", user_id=user_id, previous_response_id=previous_response_id
|
"async_chat called", user_id=user_id, previous_response_id=previous_response_id
|
||||||
|
|
@ -334,6 +349,10 @@ async def async_chat(
|
||||||
"Added user message", message_count=len(conversation_state["messages"])
|
"Added user message", message_count=len(conversation_state["messages"])
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Store filter_id in conversation state if provided
|
||||||
|
if filter_id:
|
||||||
|
conversation_state["filter_id"] = filter_id
|
||||||
|
|
||||||
response_text, response_id, response_obj = await async_response(
|
response_text, response_id, response_obj = await async_response(
|
||||||
async_client,
|
async_client,
|
||||||
prompt,
|
prompt,
|
||||||
|
|
@ -389,6 +408,7 @@ async def async_chat_stream(
|
||||||
user_id: str,
|
user_id: str,
|
||||||
model: str = "gpt-4.1-mini",
|
model: str = "gpt-4.1-mini",
|
||||||
previous_response_id: str = None,
|
previous_response_id: str = None,
|
||||||
|
filter_id: str = None,
|
||||||
):
|
):
|
||||||
# Get the specific conversation thread (or create new one)
|
# Get the specific conversation thread (or create new one)
|
||||||
conversation_state = get_conversation_thread(user_id, previous_response_id)
|
conversation_state = get_conversation_thread(user_id, previous_response_id)
|
||||||
|
|
@ -399,6 +419,10 @@ async def async_chat_stream(
|
||||||
user_message = {"role": "user", "content": prompt, "timestamp": datetime.now()}
|
user_message = {"role": "user", "content": prompt, "timestamp": datetime.now()}
|
||||||
conversation_state["messages"].append(user_message)
|
conversation_state["messages"].append(user_message)
|
||||||
|
|
||||||
|
# Store filter_id in conversation state if provided
|
||||||
|
if filter_id:
|
||||||
|
conversation_state["filter_id"] = filter_id
|
||||||
|
|
||||||
full_response = ""
|
full_response = ""
|
||||||
response_id = None
|
response_id = None
|
||||||
async for chunk in async_stream(
|
async for chunk in async_stream(
|
||||||
|
|
@ -452,6 +476,7 @@ async def async_langflow_chat(
|
||||||
extra_headers: dict = None,
|
extra_headers: dict = None,
|
||||||
previous_response_id: str = None,
|
previous_response_id: str = None,
|
||||||
store_conversation: bool = True,
|
store_conversation: bool = True,
|
||||||
|
filter_id: str = None,
|
||||||
):
|
):
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"async_langflow_chat called",
|
"async_langflow_chat called",
|
||||||
|
|
@ -478,6 +503,10 @@ async def async_langflow_chat(
|
||||||
message_count=len(conversation_state["messages"]),
|
message_count=len(conversation_state["messages"]),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Store filter_id in conversation state if provided
|
||||||
|
if filter_id:
|
||||||
|
conversation_state["filter_id"] = filter_id
|
||||||
|
|
||||||
response_text, response_id, response_obj = await async_response(
|
response_text, response_id, response_obj = await async_response(
|
||||||
langflow_client,
|
langflow_client,
|
||||||
prompt,
|
prompt,
|
||||||
|
|
@ -562,6 +591,7 @@ async def async_langflow_chat_stream(
|
||||||
user_id: str,
|
user_id: str,
|
||||||
extra_headers: dict = None,
|
extra_headers: dict = None,
|
||||||
previous_response_id: str = None,
|
previous_response_id: str = None,
|
||||||
|
filter_id: str = None,
|
||||||
):
|
):
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"async_langflow_chat_stream called",
|
"async_langflow_chat_stream called",
|
||||||
|
|
@ -578,6 +608,10 @@ async def async_langflow_chat_stream(
|
||||||
user_message = {"role": "user", "content": prompt, "timestamp": datetime.now()}
|
user_message = {"role": "user", "content": prompt, "timestamp": datetime.now()}
|
||||||
conversation_state["messages"].append(user_message)
|
conversation_state["messages"].append(user_message)
|
||||||
|
|
||||||
|
# Store filter_id in conversation state if provided
|
||||||
|
if filter_id:
|
||||||
|
conversation_state["filter_id"] = filter_id
|
||||||
|
|
||||||
full_response = ""
|
full_response = ""
|
||||||
response_id = None
|
response_id = None
|
||||||
collected_chunks = [] # Store all chunks for function call data
|
collected_chunks = [] # Store all chunks for function call data
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,7 @@ async def chat_endpoint(request: Request, chat_service, session_manager):
|
||||||
filters = data.get("filters")
|
filters = data.get("filters")
|
||||||
limit = data.get("limit", 10)
|
limit = data.get("limit", 10)
|
||||||
score_threshold = data.get("scoreThreshold", 0)
|
score_threshold = data.get("scoreThreshold", 0)
|
||||||
|
filter_id = data.get("filter_id")
|
||||||
|
|
||||||
user = request.state.user
|
user = request.state.user
|
||||||
user_id = user.user_id
|
user_id = user.user_id
|
||||||
|
|
@ -42,6 +43,7 @@ async def chat_endpoint(request: Request, chat_service, session_manager):
|
||||||
jwt_token,
|
jwt_token,
|
||||||
previous_response_id=previous_response_id,
|
previous_response_id=previous_response_id,
|
||||||
stream=True,
|
stream=True,
|
||||||
|
filter_id=filter_id,
|
||||||
),
|
),
|
||||||
media_type="text/event-stream",
|
media_type="text/event-stream",
|
||||||
headers={
|
headers={
|
||||||
|
|
@ -58,6 +60,7 @@ async def chat_endpoint(request: Request, chat_service, session_manager):
|
||||||
jwt_token,
|
jwt_token,
|
||||||
previous_response_id=previous_response_id,
|
previous_response_id=previous_response_id,
|
||||||
stream=False,
|
stream=False,
|
||||||
|
filter_id=filter_id,
|
||||||
)
|
)
|
||||||
return JSONResponse(result)
|
return JSONResponse(result)
|
||||||
|
|
||||||
|
|
@ -71,6 +74,7 @@ async def langflow_endpoint(request: Request, chat_service, session_manager):
|
||||||
filters = data.get("filters")
|
filters = data.get("filters")
|
||||||
limit = data.get("limit", 10)
|
limit = data.get("limit", 10)
|
||||||
score_threshold = data.get("scoreThreshold", 0)
|
score_threshold = data.get("scoreThreshold", 0)
|
||||||
|
filter_id = data.get("filter_id")
|
||||||
|
|
||||||
user = request.state.user
|
user = request.state.user
|
||||||
user_id = user.user_id
|
user_id = user.user_id
|
||||||
|
|
@ -100,6 +104,7 @@ async def langflow_endpoint(request: Request, chat_service, session_manager):
|
||||||
jwt_token,
|
jwt_token,
|
||||||
previous_response_id=previous_response_id,
|
previous_response_id=previous_response_id,
|
||||||
stream=True,
|
stream=True,
|
||||||
|
filter_id=filter_id,
|
||||||
),
|
),
|
||||||
media_type="text/event-stream",
|
media_type="text/event-stream",
|
||||||
headers={
|
headers={
|
||||||
|
|
@ -116,6 +121,7 @@ async def langflow_endpoint(request: Request, chat_service, session_manager):
|
||||||
jwt_token,
|
jwt_token,
|
||||||
previous_response_id=previous_response_id,
|
previous_response_id=previous_response_id,
|
||||||
stream=False,
|
stream=False,
|
||||||
|
filter_id=filter_id,
|
||||||
)
|
)
|
||||||
return JSONResponse(result)
|
return JSONResponse(result)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -37,6 +37,7 @@ async def upload_ingest_router(
|
||||||
# Route based on configuration
|
# Route based on configuration
|
||||||
if DISABLE_INGEST_WITH_LANGFLOW:
|
if DISABLE_INGEST_WITH_LANGFLOW:
|
||||||
# Route to traditional OpenRAG upload
|
# Route to traditional OpenRAG upload
|
||||||
|
# Note: onboarding filter creation is only supported in Langflow path
|
||||||
logger.debug("Routing to traditional OpenRAG upload")
|
logger.debug("Routing to traditional OpenRAG upload")
|
||||||
return await traditional_upload(request, document_service, session_manager)
|
return await traditional_upload(request, document_service, session_manager)
|
||||||
else:
|
else:
|
||||||
|
|
@ -77,6 +78,7 @@ async def langflow_upload_ingest_task(
|
||||||
tweaks_json = form.get("tweaks")
|
tweaks_json = form.get("tweaks")
|
||||||
delete_after_ingest = form.get("delete_after_ingest", "true").lower() == "true"
|
delete_after_ingest = form.get("delete_after_ingest", "true").lower() == "true"
|
||||||
replace_duplicates = form.get("replace_duplicates", "false").lower() == "true"
|
replace_duplicates = form.get("replace_duplicates", "false").lower() == "true"
|
||||||
|
create_filter = form.get("create_filter", "false").lower() == "true"
|
||||||
|
|
||||||
# Parse JSON fields if provided
|
# Parse JSON fields if provided
|
||||||
settings = None
|
settings = None
|
||||||
|
|
@ -177,14 +179,15 @@ async def langflow_upload_ingest_task(
|
||||||
|
|
||||||
logger.debug("Langflow upload task created successfully", task_id=task_id)
|
logger.debug("Langflow upload task created successfully", task_id=task_id)
|
||||||
|
|
||||||
return JSONResponse(
|
response_data = {
|
||||||
{
|
"task_id": task_id,
|
||||||
"task_id": task_id,
|
"message": f"Langflow upload task created for {len(upload_files)} file(s)",
|
||||||
"message": f"Langflow upload task created for {len(upload_files)} file(s)",
|
"file_count": len(upload_files),
|
||||||
"file_count": len(upload_files),
|
"create_filter": create_filter, # Pass flag back to frontend
|
||||||
},
|
"filename": original_filenames[0] if len(original_filenames) == 1 else None, # Pass filename for filter creation
|
||||||
status_code=202,
|
}
|
||||||
) # 202 Accepted for async processing
|
|
||||||
|
return JSONResponse(response_data, status_code=202) # 202 Accepted for async processing
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
# Clean up temp files on error
|
# Clean up temp files on error
|
||||||
|
|
|
||||||
|
|
@ -558,7 +558,7 @@ async def update_settings(request, session_manager):
|
||||||
# Update provider-specific settings
|
# Update provider-specific settings
|
||||||
provider_updated = False
|
provider_updated = False
|
||||||
if "openai_api_key" in body and body["openai_api_key"].strip():
|
if "openai_api_key" in body and body["openai_api_key"].strip():
|
||||||
current_config.providers.openai.api_key = body["openai_api_key"]
|
current_config.providers.openai.api_key = body["openai_api_key"].strip()
|
||||||
current_config.providers.openai.configured = True
|
current_config.providers.openai.configured = True
|
||||||
config_updated = True
|
config_updated = True
|
||||||
provider_updated = True
|
provider_updated = True
|
||||||
|
|
@ -617,6 +617,9 @@ async def update_settings(request, session_manager):
|
||||||
"watsonx_api_key", "watsonx_endpoint", "watsonx_project_id",
|
"watsonx_api_key", "watsonx_endpoint", "watsonx_project_id",
|
||||||
"ollama_endpoint"
|
"ollama_endpoint"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
await clients.refresh_patched_client()
|
||||||
|
|
||||||
if any(key in body for key in provider_fields_to_check):
|
if any(key in body for key in provider_fields_to_check):
|
||||||
try:
|
try:
|
||||||
flows_service = _get_flows_service()
|
flows_service = _get_flows_service()
|
||||||
|
|
@ -624,8 +627,11 @@ async def update_settings(request, session_manager):
|
||||||
# Update global variables
|
# Update global variables
|
||||||
await _update_langflow_global_variables(current_config)
|
await _update_langflow_global_variables(current_config)
|
||||||
|
|
||||||
|
# Update LLM client credentials when embedding selection changes
|
||||||
if "embedding_provider" in body or "embedding_model" in body:
|
if "embedding_provider" in body or "embedding_model" in body:
|
||||||
await _update_mcp_servers_with_provider_credentials(current_config)
|
await _update_mcp_servers_with_provider_credentials(
|
||||||
|
current_config, session_manager
|
||||||
|
)
|
||||||
|
|
||||||
# Update model values if provider or model changed
|
# Update model values if provider or model changed
|
||||||
if "llm_provider" in body or "llm_model" in body or "embedding_provider" in body or "embedding_model" in body:
|
if "llm_provider" in body or "llm_model" in body or "embedding_provider" in body or "embedding_model" in body:
|
||||||
|
|
@ -636,6 +642,7 @@ async def update_settings(request, session_manager):
|
||||||
# Don't fail the entire settings update if Langflow update fails
|
# Don't fail the entire settings update if Langflow update fails
|
||||||
# The config was still saved
|
# The config was still saved
|
||||||
|
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
"Configuration updated successfully", updated_fields=list(body.keys())
|
"Configuration updated successfully", updated_fields=list(body.keys())
|
||||||
)
|
)
|
||||||
|
|
@ -795,7 +802,7 @@ async def onboarding(request, flows_service, session_manager=None):
|
||||||
|
|
||||||
# Update provider-specific credentials
|
# Update provider-specific credentials
|
||||||
if "openai_api_key" in body and body["openai_api_key"].strip():
|
if "openai_api_key" in body and body["openai_api_key"].strip():
|
||||||
current_config.providers.openai.api_key = body["openai_api_key"]
|
current_config.providers.openai.api_key = body["openai_api_key"].strip()
|
||||||
current_config.providers.openai.configured = True
|
current_config.providers.openai.configured = True
|
||||||
config_updated = True
|
config_updated = True
|
||||||
|
|
||||||
|
|
@ -1061,11 +1068,34 @@ async def onboarding(request, flows_service, session_manager=None):
|
||||||
{"error": "Failed to save configuration"}, status_code=500
|
{"error": "Failed to save configuration"}, status_code=500
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Refresh cached patched client so latest credentials take effect immediately
|
||||||
|
await clients.refresh_patched_client()
|
||||||
|
|
||||||
|
# Create OpenRAG Docs knowledge filter if sample data was ingested
|
||||||
|
# Only create on embedding step to avoid duplicates (both LLM and embedding cards submit with sample_data)
|
||||||
|
openrag_docs_filter_id = None
|
||||||
|
if should_ingest_sample_data and ("embedding_provider" in body or "embedding_model" in body):
|
||||||
|
try:
|
||||||
|
openrag_docs_filter_id = await _create_openrag_docs_filter(
|
||||||
|
request, session_manager
|
||||||
|
)
|
||||||
|
if openrag_docs_filter_id:
|
||||||
|
logger.info(
|
||||||
|
"Created OpenRAG Docs knowledge filter",
|
||||||
|
filter_id=openrag_docs_filter_id,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
"Failed to create OpenRAG Docs knowledge filter", error=str(e)
|
||||||
|
)
|
||||||
|
# Don't fail onboarding if filter creation fails
|
||||||
|
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
{
|
{
|
||||||
"message": "Onboarding configuration updated successfully",
|
"message": "Onboarding configuration updated successfully",
|
||||||
"edited": True, # Confirm that config is now marked as edited
|
"edited": True, # Confirm that config is now marked as edited
|
||||||
"sample_data_ingested": should_ingest_sample_data,
|
"sample_data_ingested": should_ingest_sample_data,
|
||||||
|
"openrag_docs_filter_id": openrag_docs_filter_id,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -1081,6 +1111,73 @@ async def onboarding(request, flows_service, session_manager=None):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
async def _create_openrag_docs_filter(request, session_manager):
|
||||||
|
"""Create the OpenRAG Docs knowledge filter for onboarding"""
|
||||||
|
import uuid
|
||||||
|
import json
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
# Get knowledge filter service from app state
|
||||||
|
app = request.scope.get("app")
|
||||||
|
if not app or not hasattr(app.state, "services"):
|
||||||
|
logger.error("Could not access services for knowledge filter creation")
|
||||||
|
return None
|
||||||
|
|
||||||
|
knowledge_filter_service = app.state.services.get("knowledge_filter_service")
|
||||||
|
if not knowledge_filter_service:
|
||||||
|
logger.error("Knowledge filter service not available")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Get user and JWT token from request
|
||||||
|
user = request.state.user
|
||||||
|
jwt_token = session_manager.get_effective_jwt_token(user.user_id, request.state.jwt_token)
|
||||||
|
|
||||||
|
# In no-auth mode, set owner to None so filter is visible to all users
|
||||||
|
# In auth mode, use the actual user as owner
|
||||||
|
if is_no_auth_mode():
|
||||||
|
owner_user_id = None
|
||||||
|
else:
|
||||||
|
owner_user_id = user.user_id
|
||||||
|
|
||||||
|
# Create the filter document
|
||||||
|
filter_id = str(uuid.uuid4())
|
||||||
|
query_data = json.dumps({
|
||||||
|
"query": "",
|
||||||
|
"filters": {
|
||||||
|
"data_sources": ["openrag-documentation.pdf"],
|
||||||
|
"document_types": ["*"],
|
||||||
|
"owners": ["*"],
|
||||||
|
"connector_types": ["*"],
|
||||||
|
},
|
||||||
|
"limit": 10,
|
||||||
|
"scoreThreshold": 0,
|
||||||
|
"color": "blue",
|
||||||
|
"icon": "book",
|
||||||
|
})
|
||||||
|
|
||||||
|
filter_doc = {
|
||||||
|
"id": filter_id,
|
||||||
|
"name": "OpenRAG Docs",
|
||||||
|
"description": "Filter for OpenRAG documentation",
|
||||||
|
"query_data": query_data,
|
||||||
|
"owner": owner_user_id,
|
||||||
|
"allowed_users": [],
|
||||||
|
"allowed_groups": [],
|
||||||
|
"created_at": datetime.utcnow().isoformat(),
|
||||||
|
"updated_at": datetime.utcnow().isoformat(),
|
||||||
|
}
|
||||||
|
|
||||||
|
result = await knowledge_filter_service.create_knowledge_filter(
|
||||||
|
filter_doc, user_id=user.user_id, jwt_token=jwt_token
|
||||||
|
)
|
||||||
|
|
||||||
|
if result.get("success"):
|
||||||
|
return filter_id
|
||||||
|
else:
|
||||||
|
logger.error("Failed to create OpenRAG Docs filter", error=result.get("error"))
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def _get_flows_service():
|
def _get_flows_service():
|
||||||
"""Helper function to get flows service instance"""
|
"""Helper function to get flows service instance"""
|
||||||
from services.flows_service import FlowsService
|
from services.flows_service import FlowsService
|
||||||
|
|
|
||||||
|
|
@ -165,18 +165,36 @@ async def generate_langflow_api_key(modify: bool = False):
|
||||||
if validation_response.status_code == 200:
|
if validation_response.status_code == 200:
|
||||||
logger.debug("Cached API key is valid", key_prefix=LANGFLOW_KEY[:8])
|
logger.debug("Cached API key is valid", key_prefix=LANGFLOW_KEY[:8])
|
||||||
return LANGFLOW_KEY
|
return LANGFLOW_KEY
|
||||||
else:
|
elif validation_response.status_code in (401, 403):
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"Cached API key is invalid, generating fresh key",
|
"Cached API key is unauthorized, generating fresh key",
|
||||||
status_code=validation_response.status_code,
|
status_code=validation_response.status_code,
|
||||||
)
|
)
|
||||||
LANGFLOW_KEY = None # Clear invalid key
|
LANGFLOW_KEY = None # Clear invalid key
|
||||||
except Exception as e:
|
else:
|
||||||
|
logger.warning(
|
||||||
|
"Cached API key validation returned non-access error; keeping existing key",
|
||||||
|
status_code=validation_response.status_code,
|
||||||
|
)
|
||||||
|
return LANGFLOW_KEY
|
||||||
|
except requests.exceptions.Timeout as e:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"Cached API key validation failed, generating fresh key",
|
"Cached API key validation timed out; keeping existing key",
|
||||||
error=str(e),
|
error=str(e),
|
||||||
)
|
)
|
||||||
LANGFLOW_KEY = None # Clear invalid key
|
return LANGFLOW_KEY
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
logger.warning(
|
||||||
|
"Cached API key validation failed due to request error; keeping existing key",
|
||||||
|
error=str(e),
|
||||||
|
)
|
||||||
|
return LANGFLOW_KEY
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(
|
||||||
|
"Unexpected error during cached API key validation; keeping existing key",
|
||||||
|
error=str(e),
|
||||||
|
)
|
||||||
|
return LANGFLOW_KEY
|
||||||
|
|
||||||
# Use default langflow/langflow credentials if auto-login is enabled and credentials not set
|
# Use default langflow/langflow credentials if auto-login is enabled and credentials not set
|
||||||
username = LANGFLOW_SUPERUSER
|
username = LANGFLOW_SUPERUSER
|
||||||
|
|
@ -279,7 +297,7 @@ class AppClients:
|
||||||
self.opensearch = None
|
self.opensearch = None
|
||||||
self.langflow_client = None
|
self.langflow_client = None
|
||||||
self.langflow_http_client = None
|
self.langflow_http_client = None
|
||||||
self._patched_async_client = None # Private attribute
|
self._patched_async_client = None # Private attribute - single client for all providers
|
||||||
self._client_init_lock = __import__('threading').Lock() # Lock for thread-safe initialization
|
self._client_init_lock = __import__('threading').Lock() # Lock for thread-safe initialization
|
||||||
self.converter = None
|
self.converter = None
|
||||||
|
|
||||||
|
|
@ -364,6 +382,9 @@ class AppClients:
|
||||||
Property that ensures OpenAI client is initialized on first access.
|
Property that ensures OpenAI client is initialized on first access.
|
||||||
This allows lazy initialization so the app can start without an API key.
|
This allows lazy initialization so the app can start without an API key.
|
||||||
|
|
||||||
|
The client is patched with LiteLLM support to handle multiple providers.
|
||||||
|
All provider credentials are loaded into environment for LiteLLM routing.
|
||||||
|
|
||||||
Note: The client is a long-lived singleton that should be closed via cleanup().
|
Note: The client is a long-lived singleton that should be closed via cleanup().
|
||||||
Thread-safe via lock to prevent concurrent initialization attempts.
|
Thread-safe via lock to prevent concurrent initialization attempts.
|
||||||
"""
|
"""
|
||||||
|
|
@ -377,21 +398,40 @@ class AppClients:
|
||||||
if self._patched_async_client is not None:
|
if self._patched_async_client is not None:
|
||||||
return self._patched_async_client
|
return self._patched_async_client
|
||||||
|
|
||||||
# Try to initialize the client on-demand
|
# Load all provider credentials into environment for LiteLLM
|
||||||
# First check if OPENAI_API_KEY is in environment
|
# LiteLLM routes based on model name prefixes (openai/, ollama/, watsonx/, etc.)
|
||||||
openai_key = os.getenv("OPENAI_API_KEY")
|
try:
|
||||||
|
config = get_openrag_config()
|
||||||
|
|
||||||
if not openai_key:
|
# Set OpenAI credentials
|
||||||
# Try to get from config (in case it was set during onboarding)
|
if config.providers.openai.api_key:
|
||||||
try:
|
os.environ["OPENAI_API_KEY"] = config.providers.openai.api_key
|
||||||
config = get_openrag_config()
|
logger.debug("Loaded OpenAI API key from config")
|
||||||
if config and config.provider and config.provider.api_key:
|
|
||||||
openai_key = config.provider.api_key
|
# Set Anthropic credentials
|
||||||
# Set it in environment so AsyncOpenAI can pick it up
|
if config.providers.anthropic.api_key:
|
||||||
os.environ["OPENAI_API_KEY"] = openai_key
|
os.environ["ANTHROPIC_API_KEY"] = config.providers.anthropic.api_key
|
||||||
logger.info("Loaded OpenAI API key from config file")
|
logger.debug("Loaded Anthropic API key from config")
|
||||||
except Exception as e:
|
|
||||||
logger.debug("Could not load OpenAI key from config", error=str(e))
|
# Set WatsonX credentials
|
||||||
|
if config.providers.watsonx.api_key:
|
||||||
|
os.environ["WATSONX_API_KEY"] = config.providers.watsonx.api_key
|
||||||
|
if config.providers.watsonx.endpoint:
|
||||||
|
os.environ["WATSONX_ENDPOINT"] = config.providers.watsonx.endpoint
|
||||||
|
os.environ["WATSONX_API_BASE"] = config.providers.watsonx.endpoint # LiteLLM expects this name
|
||||||
|
if config.providers.watsonx.project_id:
|
||||||
|
os.environ["WATSONX_PROJECT_ID"] = config.providers.watsonx.project_id
|
||||||
|
if config.providers.watsonx.api_key:
|
||||||
|
logger.debug("Loaded WatsonX credentials from config")
|
||||||
|
|
||||||
|
# Set Ollama endpoint
|
||||||
|
if config.providers.ollama.endpoint:
|
||||||
|
os.environ["OLLAMA_BASE_URL"] = config.providers.ollama.endpoint
|
||||||
|
os.environ["OLLAMA_ENDPOINT"] = config.providers.ollama.endpoint
|
||||||
|
logger.debug("Loaded Ollama endpoint from config")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug("Could not load provider credentials from config", error=str(e))
|
||||||
|
|
||||||
# Try to initialize the client - AsyncOpenAI() will read from environment
|
# Try to initialize the client - AsyncOpenAI() will read from environment
|
||||||
# We'll try HTTP/2 first with a probe, then fall back to HTTP/1.1 if it times out
|
# We'll try HTTP/2 first with a probe, then fall back to HTTP/1.1 if it times out
|
||||||
|
|
@ -455,6 +495,27 @@ class AppClients:
|
||||||
|
|
||||||
return self._patched_async_client
|
return self._patched_async_client
|
||||||
|
|
||||||
|
@property
|
||||||
|
def patched_llm_client(self):
|
||||||
|
"""Alias for patched_async_client - for backward compatibility with code expecting separate clients."""
|
||||||
|
return self.patched_async_client
|
||||||
|
|
||||||
|
@property
|
||||||
|
def patched_embedding_client(self):
|
||||||
|
"""Alias for patched_async_client - for backward compatibility with code expecting separate clients."""
|
||||||
|
return self.patched_async_client
|
||||||
|
|
||||||
|
async def refresh_patched_client(self):
|
||||||
|
"""Reset patched client so next use picks up updated provider credentials."""
|
||||||
|
if self._patched_async_client is not None:
|
||||||
|
try:
|
||||||
|
await self._patched_async_client.close()
|
||||||
|
logger.info("Closed patched client for refresh")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("Failed to close patched client during refresh", error=str(e))
|
||||||
|
finally:
|
||||||
|
self._patched_async_client = None
|
||||||
|
|
||||||
async def cleanup(self):
|
async def cleanup(self):
|
||||||
"""Cleanup resources - should be called on application shutdown"""
|
"""Cleanup resources - should be called on application shutdown"""
|
||||||
# Close AsyncOpenAI client if it was created
|
# Close AsyncOpenAI client if it was created
|
||||||
|
|
|
||||||
|
|
@ -209,7 +209,7 @@ class TaskProcessor:
|
||||||
embeddings = []
|
embeddings = []
|
||||||
|
|
||||||
for batch in text_batches:
|
for batch in text_batches:
|
||||||
resp = await clients.patched_async_client.embeddings.create(
|
resp = await clients.patched_embedding_client.embeddings.create(
|
||||||
model=embedding_model, input=batch
|
model=embedding_model, input=batch
|
||||||
)
|
)
|
||||||
embeddings.extend([d.embedding for d in resp.data])
|
embeddings.extend([d.embedding for d in resp.data])
|
||||||
|
|
|
||||||
|
|
@ -15,6 +15,7 @@ class ChatService:
|
||||||
jwt_token: str = None,
|
jwt_token: str = None,
|
||||||
previous_response_id: str = None,
|
previous_response_id: str = None,
|
||||||
stream: bool = False,
|
stream: bool = False,
|
||||||
|
filter_id: str = None,
|
||||||
):
|
):
|
||||||
"""Handle chat requests using the patched OpenAI client"""
|
"""Handle chat requests using the patched OpenAI client"""
|
||||||
if not prompt:
|
if not prompt:
|
||||||
|
|
@ -26,17 +27,19 @@ class ChatService:
|
||||||
|
|
||||||
if stream:
|
if stream:
|
||||||
return async_chat_stream(
|
return async_chat_stream(
|
||||||
clients.patched_async_client,
|
clients.patched_llm_client,
|
||||||
prompt,
|
prompt,
|
||||||
user_id,
|
user_id,
|
||||||
previous_response_id=previous_response_id,
|
previous_response_id=previous_response_id,
|
||||||
|
filter_id=filter_id,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
response_text, response_id = await async_chat(
|
response_text, response_id = await async_chat(
|
||||||
clients.patched_async_client,
|
clients.patched_llm_client,
|
||||||
prompt,
|
prompt,
|
||||||
user_id,
|
user_id,
|
||||||
previous_response_id=previous_response_id,
|
previous_response_id=previous_response_id,
|
||||||
|
filter_id=filter_id,
|
||||||
)
|
)
|
||||||
response_data = {"response": response_text}
|
response_data = {"response": response_text}
|
||||||
if response_id:
|
if response_id:
|
||||||
|
|
@ -50,6 +53,7 @@ class ChatService:
|
||||||
jwt_token: str = None,
|
jwt_token: str = None,
|
||||||
previous_response_id: str = None,
|
previous_response_id: str = None,
|
||||||
stream: bool = False,
|
stream: bool = False,
|
||||||
|
filter_id: str = None,
|
||||||
):
|
):
|
||||||
"""Handle Langflow chat requests"""
|
"""Handle Langflow chat requests"""
|
||||||
if not prompt:
|
if not prompt:
|
||||||
|
|
@ -147,6 +151,7 @@ class ChatService:
|
||||||
user_id,
|
user_id,
|
||||||
extra_headers=extra_headers,
|
extra_headers=extra_headers,
|
||||||
previous_response_id=previous_response_id,
|
previous_response_id=previous_response_id,
|
||||||
|
filter_id=filter_id,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
from agent import async_langflow_chat
|
from agent import async_langflow_chat
|
||||||
|
|
@ -158,6 +163,7 @@ class ChatService:
|
||||||
user_id,
|
user_id,
|
||||||
extra_headers=extra_headers,
|
extra_headers=extra_headers,
|
||||||
previous_response_id=previous_response_id,
|
previous_response_id=previous_response_id,
|
||||||
|
filter_id=filter_id,
|
||||||
)
|
)
|
||||||
response_data = {"response": response_text}
|
response_data = {"response": response_text}
|
||||||
if response_id:
|
if response_id:
|
||||||
|
|
@ -344,7 +350,7 @@ class ChatService:
|
||||||
if user_id and jwt_token:
|
if user_id and jwt_token:
|
||||||
set_auth_context(user_id, jwt_token)
|
set_auth_context(user_id, jwt_token)
|
||||||
response_text, response_id = await async_chat(
|
response_text, response_id = await async_chat(
|
||||||
clients.patched_async_client,
|
clients.patched_llm_client,
|
||||||
document_prompt,
|
document_prompt,
|
||||||
user_id,
|
user_id,
|
||||||
previous_response_id=previous_response_id,
|
previous_response_id=previous_response_id,
|
||||||
|
|
@ -429,6 +435,7 @@ class ChatService:
|
||||||
"previous_response_id": conversation_state.get(
|
"previous_response_id": conversation_state.get(
|
||||||
"previous_response_id"
|
"previous_response_id"
|
||||||
),
|
),
|
||||||
|
"filter_id": conversation_state.get("filter_id"),
|
||||||
"total_messages": len(messages),
|
"total_messages": len(messages),
|
||||||
"source": "in_memory",
|
"source": "in_memory",
|
||||||
}
|
}
|
||||||
|
|
@ -447,6 +454,7 @@ class ChatService:
|
||||||
"created_at": metadata.get("created_at"),
|
"created_at": metadata.get("created_at"),
|
||||||
"last_activity": metadata.get("last_activity"),
|
"last_activity": metadata.get("last_activity"),
|
||||||
"previous_response_id": metadata.get("previous_response_id"),
|
"previous_response_id": metadata.get("previous_response_id"),
|
||||||
|
"filter_id": metadata.get("filter_id"),
|
||||||
"total_messages": metadata.get("total_messages", 0),
|
"total_messages": metadata.get("total_messages", 0),
|
||||||
"source": "metadata_only",
|
"source": "metadata_only",
|
||||||
}
|
}
|
||||||
|
|
@ -545,6 +553,7 @@ class ChatService:
|
||||||
or conversation.get("created_at"),
|
or conversation.get("created_at"),
|
||||||
"last_activity": metadata.get("last_activity")
|
"last_activity": metadata.get("last_activity")
|
||||||
or conversation.get("last_activity"),
|
or conversation.get("last_activity"),
|
||||||
|
"filter_id": metadata.get("filter_id"),
|
||||||
"total_messages": len(messages),
|
"total_messages": len(messages),
|
||||||
"source": "langflow_enhanced",
|
"source": "langflow_enhanced",
|
||||||
"langflow_session_id": session_id,
|
"langflow_session_id": session_id,
|
||||||
|
|
@ -632,4 +641,3 @@ class ChatService:
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error deleting session {session_id} from Langflow: {e}")
|
logger.error(f"Error deleting session {session_id} from Langflow: {e}")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -108,7 +108,7 @@ class ModelsService:
|
||||||
else:
|
else:
|
||||||
logger.error(f"Failed to fetch OpenAI models: {response.status_code}")
|
logger.error(f"Failed to fetch OpenAI models: {response.status_code}")
|
||||||
raise Exception(
|
raise Exception(
|
||||||
f"OpenAI API returned status code {response.status_code}"
|
f"OpenAI API returned status code {response.status_code}, {response.text}"
|
||||||
)
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
import copy
|
import copy
|
||||||
from typing import Any, Dict
|
from typing import Any, Dict
|
||||||
from agentd.tool_decorator import tool
|
from agentd.tool_decorator import tool
|
||||||
from config.settings import EMBED_MODEL, clients, INDEX_NAME, get_embedding_model
|
from config.settings import EMBED_MODEL, clients, INDEX_NAME, get_embedding_model, WATSONX_EMBEDDING_DIMENSIONS
|
||||||
from auth_context import get_auth_context
|
from auth_context import get_auth_context
|
||||||
from utils.logging_config import get_logger
|
from utils.logging_config import get_logger
|
||||||
|
|
||||||
|
|
@ -147,13 +147,38 @@ class SearchService:
|
||||||
attempts = 0
|
attempts = 0
|
||||||
last_exception = None
|
last_exception = None
|
||||||
|
|
||||||
|
# Format model name for LiteLLM compatibility
|
||||||
|
# The patched client routes through LiteLLM for non-OpenAI providers
|
||||||
|
formatted_model = model_name
|
||||||
|
|
||||||
|
# Skip if already has a provider prefix
|
||||||
|
if not any(model_name.startswith(prefix + "/") for prefix in ["openai", "ollama", "watsonx", "anthropic"]):
|
||||||
|
# Detect provider from model name characteristics:
|
||||||
|
# - Ollama: contains ":" (e.g., "nomic-embed-text:latest")
|
||||||
|
# - WatsonX: check against known IBM embedding models
|
||||||
|
# - OpenAI: everything else (no prefix needed)
|
||||||
|
|
||||||
|
if ":" in model_name:
|
||||||
|
# Ollama models use tags with colons
|
||||||
|
formatted_model = f"ollama/{model_name}"
|
||||||
|
logger.debug(f"Formatted Ollama model: {model_name} -> {formatted_model}")
|
||||||
|
elif model_name in WATSONX_EMBEDDING_DIMENSIONS:
|
||||||
|
# WatsonX embedding models - use hardcoded list from settings
|
||||||
|
formatted_model = f"watsonx/{model_name}"
|
||||||
|
logger.debug(f"Formatted WatsonX model: {model_name} -> {formatted_model}")
|
||||||
|
# else: OpenAI models don't need a prefix
|
||||||
|
|
||||||
while attempts < MAX_EMBED_RETRIES:
|
while attempts < MAX_EMBED_RETRIES:
|
||||||
attempts += 1
|
attempts += 1
|
||||||
try:
|
try:
|
||||||
resp = await clients.patched_async_client.embeddings.create(
|
resp = await clients.patched_embedding_client.embeddings.create(
|
||||||
model=model_name, input=[query]
|
model=formatted_model, input=[query]
|
||||||
)
|
)
|
||||||
return model_name, resp.data[0].embedding
|
# Try to get embedding - some providers return .embedding, others return ['embedding']
|
||||||
|
embedding = getattr(resp.data[0], 'embedding', None)
|
||||||
|
if embedding is None:
|
||||||
|
embedding = resp.data[0]['embedding']
|
||||||
|
return model_name, embedding
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
last_exception = e
|
last_exception = e
|
||||||
if attempts >= MAX_EMBED_RETRIES:
|
if attempts >= MAX_EMBED_RETRIES:
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue