Merge pull request #55 from langflow-ai/lfx-openrag-update-flows
This commit is contained in:
commit
1628f84ef5
25 changed files with 6633 additions and 4966 deletions
BIN
.DS_Store
vendored
BIN
.DS_Store
vendored
Binary file not shown.
|
|
@ -7,7 +7,7 @@ ENV RUSTFLAGS="--cfg reqwest_unstable"
|
|||
|
||||
# Accept build arguments for git repository and branch
|
||||
ARG GIT_REPO=https://github.com/langflow-ai/langflow.git
|
||||
ARG GIT_BRANCH=load_flows_autologin_false
|
||||
ARG GIT_BRANCH=test-openai-responses
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
|
|
|
|||
|
|
@ -40,9 +40,9 @@ services:
|
|||
|
||||
openrag-backend:
|
||||
image: phact/openrag-backend:${OPENRAG_VERSION:-latest}
|
||||
#build:
|
||||
#context: .
|
||||
#dockerfile: Dockerfile.backend
|
||||
# build:
|
||||
# context: .
|
||||
# dockerfile: Dockerfile.backend
|
||||
container_name: openrag-backend
|
||||
depends_on:
|
||||
- langflow
|
||||
|
|
@ -77,9 +77,10 @@ services:
|
|||
|
||||
openrag-frontend:
|
||||
image: phact/openrag-frontend:${OPENRAG_VERSION:-latest}
|
||||
#build:
|
||||
#context: .
|
||||
#dockerfile: Dockerfile.frontend
|
||||
# build:
|
||||
# context: .
|
||||
# dockerfile: Dockerfile.frontend
|
||||
# #dockerfile: Dockerfile.frontend
|
||||
container_name: openrag-frontend
|
||||
depends_on:
|
||||
- openrag-backend
|
||||
|
|
@ -92,6 +93,9 @@ services:
|
|||
volumes:
|
||||
- ./flows:/app/flows:z
|
||||
image: phact/openrag-langflow:${LANGFLOW_VERSION:-latest}
|
||||
# build:
|
||||
# context: .
|
||||
# dockerfile: Dockerfile.langflow
|
||||
container_name: langflow
|
||||
ports:
|
||||
- "7860:7860"
|
||||
|
|
@ -99,7 +103,7 @@ services:
|
|||
- OPENAI_API_KEY=${OPENAI_API_KEY}
|
||||
- LANGFLOW_LOAD_FLOWS_PATH=/app/flows
|
||||
- LANGFLOW_SECRET_KEY=${LANGFLOW_SECRET_KEY}
|
||||
- JWT="dummy"
|
||||
- JWT=None
|
||||
- OWNER=None
|
||||
- OWNER_NAME=None
|
||||
- OWNER_EMAIL=None
|
||||
|
|
|
|||
|
|
@ -1,28 +1,114 @@
|
|||
{
|
||||
"data": {
|
||||
"id": "OllamaEmbeddings-4ah5Q",
|
||||
"node": {
|
||||
"base_classes": [
|
||||
"Embeddings"
|
||||
],
|
||||
"beta": false,
|
||||
"conditional_paths": [],
|
||||
"custom_fields": {},
|
||||
"template": {
|
||||
"_type": "Component",
|
||||
"base_url": {
|
||||
"tool_mode": false,
|
||||
"trace_as_input": true,
|
||||
"trace_as_metadata": true,
|
||||
"load_from_db": true,
|
||||
"list": false,
|
||||
"list_add_label": "Add More",
|
||||
"required": true,
|
||||
"placeholder": "",
|
||||
"show": true,
|
||||
"name": "base_url",
|
||||
"value": "OLLAMA_BASE_URL",
|
||||
"display_name": "Ollama Base URL",
|
||||
"advanced": false,
|
||||
"input_types": ["Message"],
|
||||
"dynamic": false,
|
||||
"info": "",
|
||||
"title_case": false,
|
||||
"type": "str",
|
||||
"_input_type": "MessageTextInput"
|
||||
},
|
||||
"code": {
|
||||
"type": "code",
|
||||
"required": true,
|
||||
"placeholder": "",
|
||||
"list": false,
|
||||
"show": true,
|
||||
"multiline": true,
|
||||
"value": "from typing import Any\nfrom urllib.parse import urljoin\n\nimport httpx\nfrom langchain_ollama import OllamaEmbeddings\n\nfrom lfx.base.models.model import LCModelComponent\nfrom lfx.base.models.ollama_constants import OLLAMA_EMBEDDING_MODELS, URL_LIST\nfrom lfx.field_typing import Embeddings\nfrom lfx.io import DropdownInput, MessageTextInput, Output\n\nHTTP_STATUS_OK = 200\n\n\nclass OllamaEmbeddingsComponent(LCModelComponent):\n display_name: str = \"Ollama Embeddings\"\n description: str = \"Generate embeddings using Ollama models.\"\n documentation = \"https://python.langchain.com/docs/integrations/text_embedding/ollama\"\n icon = \"Ollama\"\n name = \"OllamaEmbeddings\"\n\n inputs = [\n DropdownInput(\n name=\"model_name\",\n display_name=\"Ollama Model\",\n value=\"\",\n options=[],\n real_time_refresh=True,\n refresh_button=True,\n combobox=True,\n required=True,\n ),\n MessageTextInput(\n name=\"base_url\",\n display_name=\"Ollama Base URL\",\n value=\"\",\n required=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Embeddings\", name=\"embeddings\", method=\"build_embeddings\"),\n ]\n\n def build_embeddings(self) -> Embeddings:\n try:\n output = OllamaEmbeddings(model=self.model_name, base_url=self.base_url)\n except Exception as e:\n msg = (\n \"Unable to connect to the Ollama API. \",\n \"Please verify the base URL, ensure the relevant Ollama model is pulled, and try again.\",\n )\n raise ValueError(msg) from e\n return output\n\n async def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None):\n if field_name in {\"base_url\", \"model_name\"} and not await self.is_valid_ollama_url(field_value):\n # Check if any URL in the list is valid\n valid_url = \"\"\n for url in URL_LIST:\n if await self.is_valid_ollama_url(url):\n valid_url = url\n break\n build_config[\"base_url\"][\"value\"] = valid_url\n if field_name in {\"model_name\", \"base_url\", \"tool_model_enabled\"}:\n if await self.is_valid_ollama_url(self.base_url):\n build_config[\"model_name\"][\"options\"] = await self.get_model(self.base_url)\n elif await self.is_valid_ollama_url(build_config[\"base_url\"].get(\"value\", \"\")):\n build_config[\"model_name\"][\"options\"] = await self.get_model(build_config[\"base_url\"].get(\"value\", \"\"))\n else:\n build_config[\"model_name\"][\"options\"] = []\n\n return build_config\n\n async def get_model(self, base_url_value: str) -> list[str]:\n \"\"\"Get the model names from Ollama.\"\"\"\n model_ids = []\n try:\n url = urljoin(base_url_value, \"/api/tags\")\n async with httpx.AsyncClient() as client:\n response = await client.get(url)\n response.raise_for_status()\n data = response.json()\n\n model_ids = [model[\"name\"] for model in data.get(\"models\", [])]\n # this to ensure that not embedding models are included.\n # not even the base models since models can have 1b 2b etc\n # handles cases when embeddings models have tags like :latest - etc.\n model_ids = [\n model\n for model in model_ids\n if any(model.startswith(f\"{embedding_model}\") for embedding_model in OLLAMA_EMBEDDING_MODELS)\n ]\n\n except (ImportError, ValueError, httpx.RequestError) as e:\n msg = \"Could not get model names from Ollama.\"\n raise ValueError(msg) from e\n\n return model_ids\n\n async def is_valid_ollama_url(self, url: str) -> bool:\n try:\n async with httpx.AsyncClient() as client:\n return (await client.get(f\"{url}/api/tags\")).status_code == HTTP_STATUS_OK\n except httpx.RequestError:\n return False\n",
|
||||
"fileTypes": [],
|
||||
"file_path": "",
|
||||
"password": false,
|
||||
"name": "code",
|
||||
"advanced": true,
|
||||
"dynamic": true,
|
||||
"info": "",
|
||||
"load_from_db": false,
|
||||
"title_case": false
|
||||
},
|
||||
"model_name": {
|
||||
"tool_mode": false,
|
||||
"trace_as_metadata": true,
|
||||
"options": ["nomic-embed-text:latest", "all-minilm:latest"],
|
||||
"options_metadata": [],
|
||||
"combobox": true,
|
||||
"dialog_inputs": {},
|
||||
"toggle": false,
|
||||
"required": true,
|
||||
"placeholder": "",
|
||||
"show": true,
|
||||
"name": "model_name",
|
||||
"value": "",
|
||||
"display_name": "Ollama Model",
|
||||
"advanced": false,
|
||||
"dynamic": false,
|
||||
"info": "",
|
||||
"real_time_refresh": true,
|
||||
"refresh_button": true,
|
||||
"title_case": false,
|
||||
"external_options": {},
|
||||
"type": "str",
|
||||
"_input_type": "DropdownInput"
|
||||
}
|
||||
},
|
||||
"description": "Generate embeddings using Ollama models.",
|
||||
"icon": "Ollama",
|
||||
"base_classes": ["Embeddings"],
|
||||
"display_name": "Ollama Embeddings",
|
||||
"documentation": "https://python.langchain.com/docs/integrations/text_embedding/ollama",
|
||||
"edited": false,
|
||||
"field_order": [
|
||||
"model_name",
|
||||
"base_url"
|
||||
],
|
||||
"minimized": false,
|
||||
"custom_fields": {},
|
||||
"output_types": [],
|
||||
"pinned": false,
|
||||
"conditional_paths": [],
|
||||
"frozen": false,
|
||||
"icon": "Ollama",
|
||||
"last_updated": "2025-09-22T20:18:27.128Z",
|
||||
"outputs": [
|
||||
{
|
||||
"types": ["Embeddings"],
|
||||
"selected": "Embeddings",
|
||||
"name": "embeddings",
|
||||
"display_name": "Embeddings",
|
||||
"method": "build_embeddings",
|
||||
"value": "__UNDEFINED__",
|
||||
"cache": true,
|
||||
"required_inputs": null,
|
||||
"allows_loop": false,
|
||||
"group_outputs": false,
|
||||
"options": null,
|
||||
"tool_mode": true
|
||||
}
|
||||
],
|
||||
"field_order": ["model_name", "base_url"],
|
||||
"beta": false,
|
||||
"legacy": false,
|
||||
"edited": false,
|
||||
"metadata": {
|
||||
"code_hash": "0db0f99e91e9",
|
||||
"keywords": [
|
||||
"model",
|
||||
"llm",
|
||||
"language model",
|
||||
"large language model"
|
||||
],
|
||||
"module": "lfx.components.ollama.ollama_embeddings.OllamaEmbeddingsComponent",
|
||||
"code_hash": "c41821735548",
|
||||
"dependencies": {
|
||||
"total_dependencies": 3,
|
||||
"dependencies": [
|
||||
{
|
||||
"name": "httpx",
|
||||
|
|
@ -33,125 +119,24 @@
|
|||
"version": "0.2.1"
|
||||
},
|
||||
{
|
||||
"name": "langflow",
|
||||
"name": "lfx",
|
||||
"version": null
|
||||
}
|
||||
],
|
||||
"total_dependencies": 3
|
||||
},
|
||||
"keywords": [
|
||||
"model",
|
||||
"llm",
|
||||
"language model",
|
||||
"large language model"
|
||||
],
|
||||
"module": "langflow.components.ollama.ollama_embeddings.OllamaEmbeddingsComponent"
|
||||
},
|
||||
"minimized": false,
|
||||
"output_types": [],
|
||||
"outputs": [
|
||||
{
|
||||
"allows_loop": false,
|
||||
"cache": true,
|
||||
"display_name": "Embeddings",
|
||||
"group_outputs": false,
|
||||
"method": "build_embeddings",
|
||||
"name": "embeddings",
|
||||
"options": null,
|
||||
"required_inputs": null,
|
||||
"selected": "Embeddings",
|
||||
"tool_mode": true,
|
||||
"types": [
|
||||
"Embeddings"
|
||||
],
|
||||
"value": "__UNDEFINED__"
|
||||
}
|
||||
],
|
||||
"pinned": false,
|
||||
"template": {
|
||||
"_type": "Component",
|
||||
"base_url": {
|
||||
"_input_type": "MessageTextInput",
|
||||
"advanced": false,
|
||||
"display_name": "Ollama Base URL",
|
||||
"dynamic": false,
|
||||
"info": "",
|
||||
"input_types": [
|
||||
"Message"
|
||||
],
|
||||
"list": false,
|
||||
"list_add_label": "Add More",
|
||||
"load_from_db": true,
|
||||
"name": "base_url",
|
||||
"placeholder": "",
|
||||
"required": true,
|
||||
"show": true,
|
||||
"title_case": false,
|
||||
"tool_mode": false,
|
||||
"trace_as_input": true,
|
||||
"trace_as_metadata": true,
|
||||
"type": "str",
|
||||
"value": "OLLAMA_BASE_URL"
|
||||
},
|
||||
"code": {
|
||||
"advanced": true,
|
||||
"dynamic": true,
|
||||
"fileTypes": [],
|
||||
"file_path": "",
|
||||
"info": "",
|
||||
"list": false,
|
||||
"load_from_db": false,
|
||||
"multiline": true,
|
||||
"name": "code",
|
||||
"password": false,
|
||||
"placeholder": "",
|
||||
"required": true,
|
||||
"show": true,
|
||||
"title_case": false,
|
||||
"type": "code",
|
||||
"value": "from typing import Any\nfrom urllib.parse import urljoin\n\nimport httpx\nfrom langchain_ollama import OllamaEmbeddings\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.ollama_constants import OLLAMA_EMBEDDING_MODELS, URL_LIST\nfrom langflow.field_typing import Embeddings\nfrom langflow.io import DropdownInput, MessageTextInput, Output\n\nHTTP_STATUS_OK = 200\n\n\nclass OllamaEmbeddingsComponent(LCModelComponent):\n display_name: str = \"Ollama Embeddings\"\n description: str = \"Generate embeddings using Ollama models.\"\n documentation = \"https://python.langchain.com/docs/integrations/text_embedding/ollama\"\n icon = \"Ollama\"\n name = \"OllamaEmbeddings\"\n\n inputs = [\n DropdownInput(\n name=\"model_name\",\n display_name=\"Ollama Model\",\n value=\"\",\n options=[],\n real_time_refresh=True,\n refresh_button=True,\n combobox=True,\n required=True,\n ),\n MessageTextInput(\n name=\"base_url\",\n display_name=\"Ollama Base URL\",\n value=\"\",\n required=True,\n ),\n ]\n\n outputs = [\n Output(display_name=\"Embeddings\", name=\"embeddings\", method=\"build_embeddings\"),\n ]\n\n def build_embeddings(self) -> Embeddings:\n try:\n output = OllamaEmbeddings(model=self.model_name, base_url=self.base_url)\n except Exception as e:\n msg = (\n \"Unable to connect to the Ollama API. \",\n \"Please verify the base URL, ensure the relevant Ollama model is pulled, and try again.\",\n )\n raise ValueError(msg) from e\n return output\n\n async def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None):\n if field_name in {\"base_url\", \"model_name\"} and not await self.is_valid_ollama_url(field_value):\n # Check if any URL in the list is valid\n valid_url = \"\"\n for url in URL_LIST:\n if await self.is_valid_ollama_url(url):\n valid_url = url\n break\n build_config[\"base_url\"][\"value\"] = valid_url\n if field_name in {\"model_name\", \"base_url\", \"tool_model_enabled\"}:\n if await self.is_valid_ollama_url(self.base_url):\n build_config[\"model_name\"][\"options\"] = await self.get_model(self.base_url)\n elif await self.is_valid_ollama_url(build_config[\"base_url\"].get(\"value\", \"\")):\n build_config[\"model_name\"][\"options\"] = await self.get_model(build_config[\"base_url\"].get(\"value\", \"\"))\n else:\n build_config[\"model_name\"][\"options\"] = []\n\n return build_config\n\n async def get_model(self, base_url_value: str) -> list[str]:\n \"\"\"Get the model names from Ollama.\"\"\"\n model_ids = []\n try:\n url = urljoin(base_url_value, \"/api/tags\")\n async with httpx.AsyncClient() as client:\n response = await client.get(url)\n response.raise_for_status()\n data = response.json()\n\n model_ids = [model[\"name\"] for model in data.get(\"models\", [])]\n # this to ensure that not embedding models are included.\n # not even the base models since models can have 1b 2b etc\n # handles cases when embeddings models have tags like :latest - etc.\n model_ids = [\n model\n for model in model_ids\n if any(model.startswith(f\"{embedding_model}\") for embedding_model in OLLAMA_EMBEDDING_MODELS)\n ]\n\n except (ImportError, ValueError, httpx.RequestError) as e:\n msg = \"Could not get model names from Ollama.\"\n raise ValueError(msg) from e\n\n return model_ids\n\n async def is_valid_ollama_url(self, url: str) -> bool:\n try:\n async with httpx.AsyncClient() as client:\n return (await client.get(f\"{url}/api/tags\")).status_code == HTTP_STATUS_OK\n except httpx.RequestError:\n return False\n"
|
||||
},
|
||||
"model_name": {
|
||||
"_input_type": "DropdownInput",
|
||||
"advanced": false,
|
||||
"combobox": true,
|
||||
"dialog_inputs": {},
|
||||
"display_name": "Ollama Model",
|
||||
"dynamic": false,
|
||||
"info": "",
|
||||
"name": "model_name",
|
||||
"options": [
|
||||
"all-minilm:latest"
|
||||
],
|
||||
"options_metadata": [],
|
||||
"placeholder": "",
|
||||
"real_time_refresh": true,
|
||||
"refresh_button": true,
|
||||
"required": true,
|
||||
"show": true,
|
||||
"title_case": false,
|
||||
"toggle": false,
|
||||
"tool_mode": false,
|
||||
"trace_as_metadata": true,
|
||||
"type": "str",
|
||||
"value": "all-minilm:latest"
|
||||
]
|
||||
}
|
||||
},
|
||||
"tool_mode": false
|
||||
"tool_mode": false,
|
||||
"last_updated": "2025-09-29T18:40:10.242Z",
|
||||
"official": false
|
||||
},
|
||||
"showNode": true,
|
||||
"type": "OllamaEmbeddings"
|
||||
},
|
||||
"dragging": false,
|
||||
"id": "OllamaEmbeddings-4ah5Q",
|
||||
"measured": {
|
||||
"height": 286,
|
||||
"width": 320
|
||||
"type": "OllamaEmbeddings",
|
||||
"id": "OllamaEmbeddings-vnNn8"
|
||||
},
|
||||
"id": "OllamaEmbeddings-vnNn8",
|
||||
"position": {
|
||||
"x": 282.29416840859585,
|
||||
"y": 279.4218065717267
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"selected": false,
|
||||
"type": "genericNode"
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
|
@ -1,29 +1,29 @@
|
|||
"use client"
|
||||
"use client";
|
||||
|
||||
import * as React from "react"
|
||||
import * as SwitchPrimitives from "@radix-ui/react-switch"
|
||||
import * as SwitchPrimitives from "@radix-ui/react-switch";
|
||||
import * as React from "react";
|
||||
|
||||
import { cn } from "@/lib/utils"
|
||||
import { cn } from "@/lib/utils";
|
||||
|
||||
const Switch = React.forwardRef<
|
||||
React.ElementRef<typeof SwitchPrimitives.Root>,
|
||||
React.ComponentPropsWithoutRef<typeof SwitchPrimitives.Root>
|
||||
React.ElementRef<typeof SwitchPrimitives.Root>,
|
||||
React.ComponentPropsWithoutRef<typeof SwitchPrimitives.Root>
|
||||
>(({ className, ...props }, ref) => (
|
||||
<SwitchPrimitives.Root
|
||||
className={cn(
|
||||
"peer inline-flex h-6 w-11 shrink-0 cursor-pointer items-center rounded-full border-2 border-transparent transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 focus-visible:ring-offset-background disabled:cursor-not-allowed disabled:opacity-50 data-[state=checked]:bg-primary data-[state=unchecked]:bg-muted",
|
||||
className
|
||||
)}
|
||||
{...props}
|
||||
ref={ref}
|
||||
>
|
||||
<SwitchPrimitives.Thumb
|
||||
className={cn(
|
||||
"pointer-events-none block h-5 w-5 rounded-full bg-background shadow-lg ring-0 transition-transform data-[state=checked]:translate-x-5 data-[state=unchecked]:translate-x-0 data-[state=unchecked]:bg-primary"
|
||||
)}
|
||||
/>
|
||||
</SwitchPrimitives.Root>
|
||||
))
|
||||
Switch.displayName = SwitchPrimitives.Root.displayName
|
||||
<SwitchPrimitives.Root
|
||||
className={cn(
|
||||
"peer inline-flex h-6 w-11 shrink-0 cursor-pointer items-center rounded-full border-2 border-transparent transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 focus-visible:ring-offset-background disabled:cursor-not-allowed disabled:opacity-50 data-[state=checked]:bg-primary data-[state=unchecked]:bg-muted",
|
||||
className,
|
||||
)}
|
||||
{...props}
|
||||
ref={ref}
|
||||
>
|
||||
<SwitchPrimitives.Thumb
|
||||
className={cn(
|
||||
"pointer-events-none block h-5 w-5 rounded-full bg-background shadow-lg ring-0 transition-transform data-[state=checked]:translate-x-5 data-[state=unchecked]:translate-x-0 data-[state=unchecked]:bg-primary",
|
||||
)}
|
||||
/>
|
||||
</SwitchPrimitives.Root>
|
||||
));
|
||||
Switch.displayName = SwitchPrimitives.Root.displayName;
|
||||
|
||||
export { Switch }
|
||||
export { Switch };
|
||||
|
|
|
|||
|
|
@ -1,87 +1,97 @@
|
|||
import OpenAILogo from "@/components/logo/openai-logo";
|
||||
import OllamaLogo from "@/components/logo/ollama-logo";
|
||||
import IBMLogo from "@/components/logo/ibm-logo";
|
||||
import OllamaLogo from "@/components/logo/ollama-logo";
|
||||
import OpenAILogo from "@/components/logo/openai-logo";
|
||||
|
||||
export type ModelProvider = 'openai' | 'ollama' | 'ibm';
|
||||
export type ModelProvider = "openai" | "ollama" | "watsonx";
|
||||
|
||||
export interface ModelOption {
|
||||
value: string;
|
||||
label: string;
|
||||
value: string;
|
||||
label: string;
|
||||
}
|
||||
|
||||
// Helper function to get model logo based on provider or model name
|
||||
export function getModelLogo(modelValue: string, provider?: ModelProvider) {
|
||||
// First check by provider
|
||||
if (provider === 'openai') {
|
||||
return <OpenAILogo className="w-4 h-4" />;
|
||||
} else if (provider === 'ollama') {
|
||||
return <OllamaLogo className="w-4 h-4" />;
|
||||
} else if (provider === 'ibm') {
|
||||
return <IBMLogo className="w-4 h-4" />;
|
||||
}
|
||||
// First check by provider
|
||||
if (provider === "openai") {
|
||||
return <OpenAILogo className="w-4 h-4" />;
|
||||
} else if (provider === "ollama") {
|
||||
return <OllamaLogo className="w-4 h-4" />;
|
||||
} else if (provider === "watsonx") {
|
||||
return <IBMLogo className="w-4 h-4" />;
|
||||
}
|
||||
|
||||
// Fallback to model name analysis
|
||||
if (modelValue.includes('gpt') || modelValue.includes('text-embedding')) {
|
||||
return <OpenAILogo className="w-4 h-4" />;
|
||||
} else if (modelValue.includes('llama') || modelValue.includes('ollama')) {
|
||||
return <OllamaLogo className="w-4 h-4" />;
|
||||
} else if (modelValue.includes('granite') || modelValue.includes('slate') || modelValue.includes('ibm')) {
|
||||
return <IBMLogo className="w-4 h-4" />;
|
||||
}
|
||||
// Fallback to model name analysis
|
||||
if (modelValue.includes("gpt") || modelValue.includes("text-embedding")) {
|
||||
return <OpenAILogo className="w-4 h-4" />;
|
||||
} else if (modelValue.includes("llama") || modelValue.includes("ollama")) {
|
||||
return <OllamaLogo className="w-4 h-4" />;
|
||||
} else if (
|
||||
modelValue.includes("granite") ||
|
||||
modelValue.includes("slate") ||
|
||||
modelValue.includes("ibm")
|
||||
) {
|
||||
return <IBMLogo className="w-4 h-4" />;
|
||||
}
|
||||
|
||||
return <OpenAILogo className="w-4 h-4" />; // Default to OpenAI logo
|
||||
return <OpenAILogo className="w-4 h-4" />; // Default to OpenAI logo
|
||||
}
|
||||
|
||||
// Helper function to get fallback models by provider
|
||||
export function getFallbackModels(provider: ModelProvider) {
|
||||
switch (provider) {
|
||||
case 'openai':
|
||||
return {
|
||||
language: [
|
||||
{ value: 'gpt-4', label: 'GPT-4' },
|
||||
{ value: 'gpt-4-turbo', label: 'GPT-4 Turbo' },
|
||||
{ value: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo' },
|
||||
],
|
||||
embedding: [
|
||||
{ value: 'text-embedding-ada-002', label: 'text-embedding-ada-002' },
|
||||
{ value: 'text-embedding-3-small', label: 'text-embedding-3-small' },
|
||||
{ value: 'text-embedding-3-large', label: 'text-embedding-3-large' },
|
||||
],
|
||||
};
|
||||
case 'ollama':
|
||||
return {
|
||||
language: [
|
||||
{ value: 'llama2', label: 'Llama 2' },
|
||||
{ value: 'llama2:13b', label: 'Llama 2 13B' },
|
||||
{ value: 'codellama', label: 'Code Llama' },
|
||||
],
|
||||
embedding: [
|
||||
{ value: 'mxbai-embed-large', label: 'MxBai Embed Large' },
|
||||
{ value: 'nomic-embed-text', label: 'Nomic Embed Text' },
|
||||
],
|
||||
};
|
||||
case 'ibm':
|
||||
return {
|
||||
language: [
|
||||
{ value: 'meta-llama/llama-3-1-70b-instruct', label: 'Llama 3.1 70B Instruct' },
|
||||
{ value: 'ibm/granite-13b-chat-v2', label: 'Granite 13B Chat v2' },
|
||||
],
|
||||
embedding: [
|
||||
{ value: 'ibm/slate-125m-english-rtrvr', label: 'Slate 125M English Retriever' },
|
||||
],
|
||||
};
|
||||
default:
|
||||
return {
|
||||
language: [
|
||||
{ value: 'gpt-4', label: 'GPT-4' },
|
||||
{ value: 'gpt-4-turbo', label: 'GPT-4 Turbo' },
|
||||
{ value: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo' },
|
||||
],
|
||||
embedding: [
|
||||
{ value: 'text-embedding-ada-002', label: 'text-embedding-ada-002' },
|
||||
{ value: 'text-embedding-3-small', label: 'text-embedding-3-small' },
|
||||
{ value: 'text-embedding-3-large', label: 'text-embedding-3-large' },
|
||||
],
|
||||
};
|
||||
}
|
||||
}
|
||||
switch (provider) {
|
||||
case "openai":
|
||||
return {
|
||||
language: [
|
||||
{ value: "gpt-4", label: "GPT-4" },
|
||||
{ value: "gpt-4-turbo", label: "GPT-4 Turbo" },
|
||||
{ value: "gpt-3.5-turbo", label: "GPT-3.5 Turbo" },
|
||||
],
|
||||
embedding: [
|
||||
{ value: "text-embedding-ada-002", label: "text-embedding-ada-002" },
|
||||
{ value: "text-embedding-3-small", label: "text-embedding-3-small" },
|
||||
{ value: "text-embedding-3-large", label: "text-embedding-3-large" },
|
||||
],
|
||||
};
|
||||
case "ollama":
|
||||
return {
|
||||
language: [
|
||||
{ value: "llama2", label: "Llama 2" },
|
||||
{ value: "llama2:13b", label: "Llama 2 13B" },
|
||||
{ value: "codellama", label: "Code Llama" },
|
||||
],
|
||||
embedding: [
|
||||
{ value: "mxbai-embed-large", label: "MxBai Embed Large" },
|
||||
{ value: "nomic-embed-text", label: "Nomic Embed Text" },
|
||||
],
|
||||
};
|
||||
case "watsonx":
|
||||
return {
|
||||
language: [
|
||||
{
|
||||
value: "meta-llama/llama-3-1-70b-instruct",
|
||||
label: "Llama 3.1 70B Instruct",
|
||||
},
|
||||
{ value: "ibm/granite-13b-chat-v2", label: "Granite 13B Chat v2" },
|
||||
],
|
||||
embedding: [
|
||||
{
|
||||
value: "ibm/slate-125m-english-rtrvr",
|
||||
label: "Slate 125M English Retriever",
|
||||
},
|
||||
],
|
||||
};
|
||||
default:
|
||||
return {
|
||||
language: [
|
||||
{ value: "gpt-4", label: "GPT-4" },
|
||||
{ value: "gpt-4-turbo", label: "GPT-4 Turbo" },
|
||||
{ value: "gpt-3.5-turbo", label: "GPT-3.5 Turbo" },
|
||||
],
|
||||
embedding: [
|
||||
{ value: "text-embedding-ada-002", label: "text-embedding-ada-002" },
|
||||
{ value: "text-embedding-3-small", label: "text-embedding-3-small" },
|
||||
{ value: "text-embedding-3-large", label: "text-embedding-3-large" },
|
||||
],
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -70,22 +70,22 @@ async def run_ingestion(
|
|||
if settings:
|
||||
logger.debug("Applying ingestion settings", settings=settings)
|
||||
|
||||
# Split Text component tweaks (SplitText-QIKhg)
|
||||
# Split Text component tweaks (SplitText-PC36h)
|
||||
if (
|
||||
settings.get("chunkSize")
|
||||
or settings.get("chunkOverlap")
|
||||
or settings.get("separator")
|
||||
):
|
||||
if "SplitText-QIKhg" not in tweaks:
|
||||
tweaks["SplitText-QIKhg"] = {}
|
||||
if "SplitText-PC36h" not in tweaks:
|
||||
tweaks["SplitText-PC36h"] = {}
|
||||
if settings.get("chunkSize"):
|
||||
tweaks["SplitText-QIKhg"]["chunk_size"] = settings["chunkSize"]
|
||||
tweaks["SplitText-PC36h"]["chunk_size"] = settings["chunkSize"]
|
||||
if settings.get("chunkOverlap"):
|
||||
tweaks["SplitText-QIKhg"]["chunk_overlap"] = settings[
|
||||
tweaks["SplitText-PC36h"]["chunk_overlap"] = settings[
|
||||
"chunkOverlap"
|
||||
]
|
||||
if settings.get("separator"):
|
||||
tweaks["SplitText-QIKhg"]["separator"] = settings["separator"]
|
||||
tweaks["SplitText-PC36h"]["separator"] = settings["separator"]
|
||||
|
||||
# OpenAI Embeddings component tweaks (OpenAIEmbeddings-joRJ6)
|
||||
if settings.get("embeddingModel"):
|
||||
|
|
@ -275,4 +275,4 @@ async def delete_user_files(
|
|||
status_code=status,
|
||||
)
|
||||
except Exception as e:
|
||||
return JSONResponse({"error": str(e)}, status_code=500)
|
||||
return JSONResponse({"error": str(e)}, status_code=500)
|
||||
|
|
@ -140,6 +140,14 @@ async def langflow_upload_ingest_task(
|
|||
)
|
||||
|
||||
# Create langflow upload task
|
||||
print(f"tweaks: {tweaks}")
|
||||
print(f"settings: {settings}")
|
||||
print(f"jwt_token: {jwt_token}")
|
||||
print(f"user_name: {user_name}")
|
||||
print(f"user_email: {user_email}")
|
||||
print(f"session_id: {session_id}")
|
||||
print(f"delete_after_ingest: {delete_after_ingest}")
|
||||
print(f"temp_file_paths: {temp_file_paths}")
|
||||
task_id = await task_service.create_langflow_upload_task(
|
||||
user_id=user_id,
|
||||
file_paths=temp_file_paths,
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ from config.settings import (
|
|||
LANGFLOW_CHAT_FLOW_ID,
|
||||
LANGFLOW_INGEST_FLOW_ID,
|
||||
LANGFLOW_PUBLIC_URL,
|
||||
DOCLING_COMPONENT_ID,
|
||||
LOCALHOST_URL,
|
||||
clients,
|
||||
get_openrag_config,
|
||||
|
|
@ -206,7 +205,7 @@ async def update_settings(request, session_manager):
|
|||
# Also update the chat flow with the new model
|
||||
try:
|
||||
flows_service = _get_flows_service()
|
||||
await flows_service.update_chat_flow_model(body["llm_model"])
|
||||
await flows_service.update_chat_flow_model(body["llm_model"], current_config.provider.model_provider.lower())
|
||||
logger.info(
|
||||
f"Successfully updated chat flow model to '{body['llm_model']}'"
|
||||
)
|
||||
|
|
@ -223,7 +222,8 @@ async def update_settings(request, session_manager):
|
|||
try:
|
||||
flows_service = _get_flows_service()
|
||||
await flows_service.update_chat_flow_system_prompt(
|
||||
body["system_prompt"]
|
||||
body["system_prompt"],
|
||||
current_config.provider.model_provider.lower()
|
||||
)
|
||||
logger.info(f"Successfully updated chat flow system prompt")
|
||||
except Exception as e:
|
||||
|
|
@ -248,7 +248,8 @@ async def update_settings(request, session_manager):
|
|||
try:
|
||||
flows_service = _get_flows_service()
|
||||
await flows_service.update_ingest_flow_embedding_model(
|
||||
body["embedding_model"].strip()
|
||||
body["embedding_model"].strip(),
|
||||
current_config.provider.model_provider.lower()
|
||||
)
|
||||
logger.info(
|
||||
f"Successfully updated ingest flow embedding model to '{body['embedding_model'].strip()}'"
|
||||
|
|
|
|||
|
|
@ -543,38 +543,28 @@ OLLAMA_EMBEDDING_COMPONENT_PATH = os.getenv(
|
|||
|
||||
# Component IDs in flows
|
||||
|
||||
OPENAI_EMBEDDING_COMPONENT_ID = os.getenv(
|
||||
"OPENAI_EMBEDDING_COMPONENT_ID", "EmbeddingModel-eZ6bT"
|
||||
OPENAI_EMBEDDING_COMPONENT_DISPLAY_NAME = os.getenv(
|
||||
"OPENAI_EMBEDDING_COMPONENT_DISPLAY_NAME", "Embedding Model"
|
||||
)
|
||||
OPENAI_LLM_COMPONENT_ID = os.getenv(
|
||||
"OPENAI_LLM_COMPONENT_ID", "LanguageModelComponent-0YME7"
|
||||
)
|
||||
OPENAI_LLM_TEXT_COMPONENT_ID = os.getenv(
|
||||
"OPENAI_LLM_TEXT_COMPONENT_ID", "LanguageModelComponent-NSTA6"
|
||||
OPENAI_LLM_COMPONENT_DISPLAY_NAME = os.getenv(
|
||||
"OPENAI_LLM_COMPONENT_DISPLAY_NAME", "Language Model"
|
||||
)
|
||||
|
||||
# Provider-specific component IDs
|
||||
WATSONX_EMBEDDING_COMPONENT_ID = os.getenv(
|
||||
"WATSONX_EMBEDDING_COMPONENT_ID", "WatsonxEmbeddingsComponent-pJfXI"
|
||||
WATSONX_EMBEDDING_COMPONENT_DISPLAY_NAME = os.getenv(
|
||||
"WATSONX_EMBEDDING_COMPONENT_DISPLAY_NAME", "IBM watsonx.ai Embeddings"
|
||||
)
|
||||
WATSONX_LLM_COMPONENT_ID = os.getenv(
|
||||
"WATSONX_LLM_COMPONENT_ID", "IBMwatsonxModel-jA4Nw"
|
||||
)
|
||||
WATSONX_LLM_TEXT_COMPONENT_ID = os.getenv(
|
||||
"WATSONX_LLM_TEXT_COMPONENT_ID", "IBMwatsonxModel-18kmA"
|
||||
WATSONX_LLM_COMPONENT_DISPLAY_NAME = os.getenv(
|
||||
"WATSONX_LLM_COMPONENT_DISPLAY_NAME", "IBM watsonx.ai"
|
||||
)
|
||||
|
||||
|
||||
OLLAMA_EMBEDDING_COMPONENT_ID = os.getenv(
|
||||
"OLLAMA_EMBEDDING_COMPONENT_ID", "OllamaEmbeddings-4ah5Q"
|
||||
)
|
||||
OLLAMA_LLM_COMPONENT_ID = os.getenv("OLLAMA_LLM_COMPONENT_ID", "OllamaModel-eCsJx")
|
||||
OLLAMA_LLM_TEXT_COMPONENT_ID = os.getenv(
|
||||
"OLLAMA_LLM_TEXT_COMPONENT_ID", "OllamaModel-XDGqZ"
|
||||
OLLAMA_EMBEDDING_COMPONENT_DISPLAY_NAME = os.getenv(
|
||||
"OLLAMA_EMBEDDING_COMPONENT_DISPLAY_NAME", "Ollama Model"
|
||||
)
|
||||
OLLAMA_LLM_COMPONENT_DISPLAY_NAME = os.getenv("OLLAMA_LLM_COMPONENT_DISPLAY_NAME", "Ollama")
|
||||
|
||||
# Docling component ID for ingest flow
|
||||
DOCLING_COMPONENT_ID = os.getenv("DOCLING_COMPONENT_ID", "DoclingRemote-78KoX")
|
||||
DOCLING_COMPONENT_DISPLAY_NAME = os.getenv("DOCLING_COMPONENT_DISPLAY_NAME", "Docling Serve")
|
||||
|
||||
LOCALHOST_URL = get_container_host() or "localhost"
|
||||
|
||||
|
|
|
|||
|
|
@ -382,6 +382,10 @@ async def _ingest_default_documents_langflow(services, file_paths):
|
|||
settings=None, # Use default ingestion settings
|
||||
jwt_token=effective_jwt, # Use JWT token (anonymous if needed)
|
||||
delete_after_ingest=True, # Clean up after ingestion
|
||||
owner=None,
|
||||
owner_name=anonymous_user.name,
|
||||
owner_email=anonymous_user.email,
|
||||
connector_type="system_default",
|
||||
)
|
||||
|
||||
logger.info(
|
||||
|
|
@ -1189,4 +1193,4 @@ if __name__ == "__main__":
|
|||
host="0.0.0.0",
|
||||
port=8000,
|
||||
reload=False, # Disable reload since we're running from main
|
||||
)
|
||||
)
|
||||
|
|
@ -625,7 +625,12 @@ class LangflowFileProcessor(TaskProcessor):
|
|||
tweaks=final_tweaks,
|
||||
settings=self.settings,
|
||||
jwt_token=effective_jwt,
|
||||
delete_after_ingest=self.delete_after_ingest
|
||||
delete_after_ingest=self.delete_after_ingest,
|
||||
owner=self.owner_user_id,
|
||||
owner_name=self.owner_name,
|
||||
owner_email=self.owner_email,
|
||||
connector_type="local",
|
||||
|
||||
)
|
||||
|
||||
# Update task with success
|
||||
|
|
@ -640,4 +645,4 @@ class LangflowFileProcessor(TaskProcessor):
|
|||
file_task.error_message = str(e)
|
||||
file_task.updated_at = time.time()
|
||||
upload_task.failed_files += 1
|
||||
raise
|
||||
raise
|
||||
|
|
@ -110,15 +110,14 @@ class ChatService:
|
|||
filter_expression["score_threshold"] = score_threshold
|
||||
|
||||
# Pass the complete filter expression as a single header to Langflow (only if we have something to send)
|
||||
if filter_expression:
|
||||
logger.info(
|
||||
"Sending OpenRAG query filter to Langflow",
|
||||
filter_expression=filter_expression,
|
||||
)
|
||||
extra_headers["X-LANGFLOW-GLOBAL-VAR-OPENRAG-QUERY-FILTER"] = json.dumps(
|
||||
filter_expression
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Sending OpenRAG query filter to Langflow",
|
||||
filter_expression=filter_expression,
|
||||
)
|
||||
extra_headers["X-LANGFLOW-GLOBAL-VAR-OPENRAG-QUERY-FILTER"] = json.dumps(
|
||||
filter_expression
|
||||
)
|
||||
logger.info(f"[LF] Extra headers {extra_headers}")
|
||||
# Ensure the Langflow client exists; try lazy init if needed
|
||||
langflow_client = await clients.ensure_langflow_client()
|
||||
if not langflow_client:
|
||||
|
|
|
|||
|
|
@ -1,26 +1,22 @@
|
|||
import asyncio
|
||||
from config.settings import (
|
||||
DISABLE_INGEST_WITH_LANGFLOW,
|
||||
NUDGES_FLOW_ID,
|
||||
LANGFLOW_URL,
|
||||
LANGFLOW_CHAT_FLOW_ID,
|
||||
LANGFLOW_INGEST_FLOW_ID,
|
||||
OLLAMA_LLM_TEXT_COMPONENT_ID,
|
||||
OLLAMA_LLM_TEXT_COMPONENT_PATH,
|
||||
OPENAI_EMBEDDING_COMPONENT_ID,
|
||||
OPENAI_LLM_COMPONENT_ID,
|
||||
OPENAI_LLM_TEXT_COMPONENT_ID,
|
||||
WATSONX_LLM_TEXT_COMPONENT_ID,
|
||||
OPENAI_EMBEDDING_COMPONENT_DISPLAY_NAME,
|
||||
OPENAI_LLM_COMPONENT_DISPLAY_NAME,
|
||||
WATSONX_LLM_TEXT_COMPONENT_PATH,
|
||||
clients,
|
||||
WATSONX_LLM_COMPONENT_PATH,
|
||||
WATSONX_EMBEDDING_COMPONENT_PATH,
|
||||
OLLAMA_LLM_COMPONENT_PATH,
|
||||
OLLAMA_EMBEDDING_COMPONENT_PATH,
|
||||
WATSONX_EMBEDDING_COMPONENT_ID,
|
||||
WATSONX_LLM_COMPONENT_ID,
|
||||
OLLAMA_EMBEDDING_COMPONENT_ID,
|
||||
OLLAMA_LLM_COMPONENT_ID,
|
||||
WATSONX_EMBEDDING_COMPONENT_DISPLAY_NAME,
|
||||
WATSONX_LLM_COMPONENT_DISPLAY_NAME,
|
||||
OLLAMA_EMBEDDING_COMPONENT_DISPLAY_NAME,
|
||||
OLLAMA_LLM_COMPONENT_DISPLAY_NAME,
|
||||
get_openrag_config,
|
||||
)
|
||||
import json
|
||||
|
|
@ -277,23 +273,23 @@ class FlowsService:
|
|||
{
|
||||
"name": "nudges",
|
||||
"flow_id": NUDGES_FLOW_ID,
|
||||
"embedding_id": OPENAI_EMBEDDING_COMPONENT_ID,
|
||||
"llm_id": OPENAI_LLM_COMPONENT_ID,
|
||||
"llm_text_id": OPENAI_LLM_TEXT_COMPONENT_ID,
|
||||
"embedding_name": OPENAI_EMBEDDING_COMPONENT_DISPLAY_NAME,
|
||||
"llm_text_name": OPENAI_LLM_COMPONENT_DISPLAY_NAME,
|
||||
"llm_name": None,
|
||||
},
|
||||
{
|
||||
"name": "retrieval",
|
||||
"flow_id": LANGFLOW_CHAT_FLOW_ID,
|
||||
"embedding_id": OPENAI_EMBEDDING_COMPONENT_ID,
|
||||
"llm_id": OPENAI_LLM_COMPONENT_ID,
|
||||
"llm_text_id": None,
|
||||
"embedding_name": OPENAI_EMBEDDING_COMPONENT_DISPLAY_NAME,
|
||||
"llm_name": OPENAI_LLM_COMPONENT_DISPLAY_NAME,
|
||||
"llm_text_name": None,
|
||||
},
|
||||
{
|
||||
"name": "ingest",
|
||||
"flow_id": LANGFLOW_INGEST_FLOW_ID,
|
||||
"embedding_id": OPENAI_EMBEDDING_COMPONENT_ID,
|
||||
"llm_id": None, # Ingestion flow might not have LLM
|
||||
"llm_text_id": None, # Ingestion flow might not have LLM Text
|
||||
"embedding_name": OPENAI_EMBEDDING_COMPONENT_DISPLAY_NAME,
|
||||
"llm_name": None, # Ingestion flow might not have LLM
|
||||
"llm_text_name": None,
|
||||
},
|
||||
]
|
||||
|
||||
|
|
@ -389,9 +385,9 @@ class FlowsService:
|
|||
"""Update components in a specific flow"""
|
||||
flow_name = config["name"]
|
||||
flow_id = config["flow_id"]
|
||||
old_embedding_id = config["embedding_id"]
|
||||
old_llm_id = config["llm_id"]
|
||||
old_llm_text_id = config["llm_text_id"]
|
||||
old_embedding_name = config["embedding_name"]
|
||||
old_llm_name = config["llm_name"]
|
||||
old_llm_text_name = config["llm_text_name"]
|
||||
# Extract IDs from templates
|
||||
new_llm_id = llm_template["data"]["id"]
|
||||
new_embedding_id = embedding_template["data"]["id"]
|
||||
|
|
@ -411,7 +407,7 @@ class FlowsService:
|
|||
|
||||
# Replace embedding component
|
||||
if not DISABLE_INGEST_WITH_LANGFLOW:
|
||||
embedding_node = self._find_node_by_id(flow_data, old_embedding_id)
|
||||
embedding_node, _ = self._find_node_in_flow(flow_data, display_name=old_embedding_name)
|
||||
if embedding_node:
|
||||
# Preserve position
|
||||
original_position = embedding_node.get("position", {})
|
||||
|
|
@ -421,16 +417,14 @@ class FlowsService:
|
|||
new_embedding_node["position"] = original_position
|
||||
|
||||
# Replace in flow
|
||||
self._replace_node_in_flow(
|
||||
flow_data, old_embedding_id, new_embedding_node
|
||||
)
|
||||
self._replace_node_in_flow(flow_data, old_embedding_name, new_embedding_node)
|
||||
components_updated.append(
|
||||
f"embedding: {old_embedding_id} -> {new_embedding_id}"
|
||||
f"embedding: {old_embedding_name} -> {new_embedding_id}"
|
||||
)
|
||||
|
||||
# Replace LLM component (if exists in this flow)
|
||||
if old_llm_id:
|
||||
llm_node = self._find_node_by_id(flow_data, old_llm_id)
|
||||
if old_llm_name:
|
||||
llm_node, _ = self._find_node_in_flow(flow_data, display_name=old_llm_name)
|
||||
if llm_node:
|
||||
# Preserve position
|
||||
original_position = llm_node.get("position", {})
|
||||
|
|
@ -440,12 +434,12 @@ class FlowsService:
|
|||
new_llm_node["position"] = original_position
|
||||
|
||||
# Replace in flow
|
||||
self._replace_node_in_flow(flow_data, old_llm_id, new_llm_node)
|
||||
components_updated.append(f"llm: {old_llm_id} -> {new_llm_id}")
|
||||
self._replace_node_in_flow(flow_data, old_llm_name, new_llm_node)
|
||||
components_updated.append(f"llm: {old_llm_name} -> {new_llm_id}")
|
||||
|
||||
# Replace LLM component (if exists in this flow)
|
||||
if old_llm_text_id:
|
||||
llm_text_node = self._find_node_by_id(flow_data, old_llm_text_id)
|
||||
if old_llm_text_name:
|
||||
llm_text_node, _ = self._find_node_in_flow(flow_data, display_name=old_llm_text_name)
|
||||
if llm_text_node:
|
||||
# Preserve position
|
||||
original_position = llm_text_node.get("position", {})
|
||||
|
|
@ -455,12 +449,18 @@ class FlowsService:
|
|||
new_llm_text_node["position"] = original_position
|
||||
|
||||
# Replace in flow
|
||||
self._replace_node_in_flow(
|
||||
flow_data, old_llm_text_id, new_llm_text_node
|
||||
)
|
||||
components_updated.append(
|
||||
f"llm: {old_llm_text_id} -> {new_llm_text_id}"
|
||||
)
|
||||
self._replace_node_in_flow(flow_data, old_llm_text_name, new_llm_text_node)
|
||||
components_updated.append(f"llm: {old_llm_text_name} -> {new_llm_text_id}")
|
||||
|
||||
old_embedding_id = None
|
||||
old_llm_id = None
|
||||
old_llm_text_id = None
|
||||
if embedding_node:
|
||||
old_embedding_id = embedding_node.get("data", {}).get("id")
|
||||
if old_llm_name and llm_node:
|
||||
old_llm_id = llm_node.get("data", {}).get("id")
|
||||
if old_llm_text_name and llm_text_node:
|
||||
old_llm_text_id = llm_text_node.get("data", {}).get("id")
|
||||
|
||||
# Update all edge references using regex replacement
|
||||
flow_json_str = json.dumps(flow_data)
|
||||
|
|
@ -478,17 +478,27 @@ class FlowsService:
|
|||
|
||||
# Replace LLM ID references (if applicable)
|
||||
if old_llm_id:
|
||||
flow_json_str = re.sub(re.escape(old_llm_id), new_llm_id, flow_json_str)
|
||||
if old_llm_text_id:
|
||||
flow_json_str = re.sub(
|
||||
re.escape(old_llm_text_id), new_llm_text_id, flow_json_str
|
||||
)
|
||||
flow_json_str = re.sub(
|
||||
re.escape(old_llm_id), new_llm_id, flow_json_str
|
||||
)
|
||||
|
||||
flow_json_str = re.sub(
|
||||
re.escape(old_llm_id.split("-")[0]),
|
||||
new_llm_id.split("-")[0],
|
||||
flow_json_str,
|
||||
)
|
||||
|
||||
# Replace text LLM ID references (if applicable)
|
||||
if old_llm_text_id:
|
||||
flow_json_str = re.sub(
|
||||
re.escape(old_llm_text_id), new_llm_text_id, flow_json_str
|
||||
)
|
||||
|
||||
flow_json_str = re.sub(
|
||||
re.escape(old_llm_text_id.split("-")[0]),
|
||||
new_llm_text_id.split("-")[0],
|
||||
flow_json_str,
|
||||
)
|
||||
|
||||
# Convert back to JSON
|
||||
flow_data = json.loads(flow_json_str)
|
||||
|
|
@ -510,14 +520,6 @@ class FlowsService:
|
|||
"flow_id": flow_id,
|
||||
}
|
||||
|
||||
def _find_node_by_id(self, flow_data, node_id):
|
||||
"""Find a node by ID in the flow data"""
|
||||
nodes = flow_data.get("data", {}).get("nodes", [])
|
||||
for node in nodes:
|
||||
if node.get("id") == node_id:
|
||||
return node
|
||||
return None
|
||||
|
||||
def _find_node_in_flow(self, flow_data, node_id=None, display_name=None):
|
||||
"""
|
||||
Helper function to find a node in flow data by ID or display name.
|
||||
|
|
@ -539,14 +541,7 @@ class FlowsService:
|
|||
|
||||
return None, None
|
||||
|
||||
async def _update_flow_field(
|
||||
self,
|
||||
flow_id: str,
|
||||
field_name: str,
|
||||
field_value: str,
|
||||
node_display_name: str = None,
|
||||
node_id: str = None,
|
||||
):
|
||||
async def _update_flow_field(self, flow_id: str, field_name: str, field_value: str, node_display_name: str = None):
|
||||
"""
|
||||
Generic helper function to update any field in any Langflow component.
|
||||
|
||||
|
|
@ -577,23 +572,18 @@ class FlowsService:
|
|||
flow_data, display_name=node_display_name
|
||||
)
|
||||
|
||||
if target_node is None and node_id:
|
||||
target_node, target_node_index = self._find_node_in_flow(
|
||||
flow_data, node_id=node_id
|
||||
)
|
||||
|
||||
if target_node is None:
|
||||
identifier = node_display_name or node_id
|
||||
identifier = node_display_name
|
||||
raise Exception(f"Component '{identifier}' not found in flow {flow_id}")
|
||||
|
||||
# Update the field value directly in the existing node
|
||||
template = target_node.get("data", {}).get("node", {}).get("template", {})
|
||||
if template.get(field_name):
|
||||
flow_data["data"]["nodes"][target_node_index]["data"]["node"]["template"][
|
||||
field_name
|
||||
]["value"] = field_value
|
||||
flow_data["data"]["nodes"][target_node_index]["data"]["node"]["template"][field_name]["value"] = field_value
|
||||
if "options" in flow_data["data"]["nodes"][target_node_index]["data"]["node"]["template"][field_name] and field_value not in flow_data["data"]["nodes"][target_node_index]["data"]["node"]["template"][field_name]["options"]:
|
||||
flow_data["data"]["nodes"][target_node_index]["data"]["node"]["template"][field_name]["options"].append(field_value)
|
||||
else:
|
||||
identifier = node_display_name or node_id
|
||||
identifier = node_display_name
|
||||
raise Exception(f"{field_name} field not found in {identifier} component")
|
||||
|
||||
# Update the flow via PATCH request
|
||||
|
|
@ -606,41 +596,36 @@ class FlowsService:
|
|||
f"Failed to update flow: HTTP {patch_response.status_code} - {patch_response.text}"
|
||||
)
|
||||
|
||||
async def update_chat_flow_model(self, model_name: str):
|
||||
async def update_chat_flow_model(self, model_name: str, provider: str):
|
||||
"""Helper function to update the model in the chat flow"""
|
||||
if not LANGFLOW_CHAT_FLOW_ID:
|
||||
raise ValueError("LANGFLOW_CHAT_FLOW_ID is not configured")
|
||||
await self._update_flow_field(
|
||||
LANGFLOW_CHAT_FLOW_ID,
|
||||
"model_name",
|
||||
model_name,
|
||||
node_display_name="Language Model",
|
||||
)
|
||||
|
||||
async def update_chat_flow_system_prompt(self, system_prompt: str):
|
||||
# Determine target component IDs based on provider
|
||||
target_llm_id = self._get_provider_component_ids(provider)[1]
|
||||
|
||||
await self._update_flow_field(LANGFLOW_CHAT_FLOW_ID, "model_name", model_name,
|
||||
node_display_name=target_llm_id)
|
||||
|
||||
async def update_chat_flow_system_prompt(self, system_prompt: str, provider: str):
|
||||
"""Helper function to update the system prompt in the chat flow"""
|
||||
if not LANGFLOW_CHAT_FLOW_ID:
|
||||
raise ValueError("LANGFLOW_CHAT_FLOW_ID is not configured")
|
||||
await self._update_flow_field(
|
||||
LANGFLOW_CHAT_FLOW_ID,
|
||||
"system_prompt",
|
||||
system_prompt,
|
||||
node_display_name="Agent",
|
||||
)
|
||||
|
||||
# Determine target component IDs based on provider
|
||||
target_agent_id = self._get_provider_component_ids(provider)[1]
|
||||
|
||||
await self._update_flow_field(LANGFLOW_CHAT_FLOW_ID, "system_prompt", system_prompt,
|
||||
node_display_name=target_agent_id)
|
||||
|
||||
async def update_flow_docling_preset(self, preset: str, preset_config: dict):
|
||||
"""Helper function to update docling preset in the ingest flow"""
|
||||
if not LANGFLOW_INGEST_FLOW_ID:
|
||||
raise ValueError("LANGFLOW_INGEST_FLOW_ID is not configured")
|
||||
|
||||
from config.settings import DOCLING_COMPONENT_ID
|
||||
|
||||
await self._update_flow_field(
|
||||
LANGFLOW_INGEST_FLOW_ID,
|
||||
"docling_serve_opts",
|
||||
preset_config,
|
||||
node_id=DOCLING_COMPONENT_ID,
|
||||
)
|
||||
from config.settings import DOCLING_COMPONENT_DISPLAY_NAME
|
||||
await self._update_flow_field(LANGFLOW_INGEST_FLOW_ID, "docling_serve_opts", preset_config,
|
||||
node_display_name=DOCLING_COMPONENT_DISPLAY_NAME)
|
||||
|
||||
async def update_ingest_flow_chunk_size(self, chunk_size: int):
|
||||
"""Helper function to update chunk size in the ingest flow"""
|
||||
|
|
@ -664,22 +649,22 @@ class FlowsService:
|
|||
node_display_name="Split Text",
|
||||
)
|
||||
|
||||
async def update_ingest_flow_embedding_model(self, embedding_model: str):
|
||||
async def update_ingest_flow_embedding_model(self, embedding_model: str, provider: str):
|
||||
"""Helper function to update embedding model in the ingest flow"""
|
||||
if not LANGFLOW_INGEST_FLOW_ID:
|
||||
raise ValueError("LANGFLOW_INGEST_FLOW_ID is not configured")
|
||||
await self._update_flow_field(
|
||||
LANGFLOW_INGEST_FLOW_ID,
|
||||
"model",
|
||||
embedding_model,
|
||||
node_display_name="Embedding Model",
|
||||
)
|
||||
|
||||
def _replace_node_in_flow(self, flow_data, old_id, new_node):
|
||||
# Determine target component IDs based on provider
|
||||
target_embedding_id = self._get_provider_component_ids(provider)[0]
|
||||
|
||||
await self._update_flow_field(LANGFLOW_INGEST_FLOW_ID, "model", embedding_model,
|
||||
node_display_name=target_embedding_id)
|
||||
|
||||
def _replace_node_in_flow(self, flow_data, old_display_name, new_node):
|
||||
"""Replace a node in the flow data"""
|
||||
nodes = flow_data.get("data", {}).get("nodes", [])
|
||||
for i, node in enumerate(nodes):
|
||||
if node.get("id") == old_id:
|
||||
if node.get("data", {}).get("node", {}).get("display_name") == old_display_name:
|
||||
nodes[i] = new_node
|
||||
return True
|
||||
return False
|
||||
|
|
@ -734,8 +719,8 @@ class FlowsService:
|
|||
]
|
||||
|
||||
# Determine target component IDs based on provider
|
||||
target_embedding_id, target_llm_id, target_llm_text_id = (
|
||||
self._get_provider_component_ids(provider)
|
||||
target_embedding_name, target_llm_name = self._get_provider_component_ids(
|
||||
provider
|
||||
)
|
||||
|
||||
results = []
|
||||
|
|
@ -746,9 +731,8 @@ class FlowsService:
|
|||
result = await self._update_provider_components(
|
||||
config,
|
||||
provider,
|
||||
target_embedding_id,
|
||||
target_llm_id,
|
||||
target_llm_text_id,
|
||||
target_embedding_name,
|
||||
target_llm_name,
|
||||
embedding_model,
|
||||
llm_model,
|
||||
endpoint,
|
||||
|
|
@ -791,24 +775,12 @@ class FlowsService:
|
|||
def _get_provider_component_ids(self, provider: str):
|
||||
"""Get the component IDs for a specific provider"""
|
||||
if provider == "watsonx":
|
||||
return (
|
||||
WATSONX_EMBEDDING_COMPONENT_ID,
|
||||
WATSONX_LLM_COMPONENT_ID,
|
||||
WATSONX_LLM_TEXT_COMPONENT_ID,
|
||||
)
|
||||
return WATSONX_EMBEDDING_COMPONENT_DISPLAY_NAME, WATSONX_LLM_COMPONENT_DISPLAY_NAME
|
||||
elif provider == "ollama":
|
||||
return (
|
||||
OLLAMA_EMBEDDING_COMPONENT_ID,
|
||||
OLLAMA_LLM_COMPONENT_ID,
|
||||
OLLAMA_LLM_TEXT_COMPONENT_ID,
|
||||
)
|
||||
return OLLAMA_EMBEDDING_COMPONENT_DISPLAY_NAME, OLLAMA_LLM_COMPONENT_DISPLAY_NAME
|
||||
elif provider == "openai":
|
||||
# OpenAI components are the default ones
|
||||
return (
|
||||
OPENAI_EMBEDDING_COMPONENT_ID,
|
||||
OPENAI_LLM_COMPONENT_ID,
|
||||
OPENAI_LLM_TEXT_COMPONENT_ID,
|
||||
)
|
||||
return OPENAI_EMBEDDING_COMPONENT_DISPLAY_NAME, OPENAI_LLM_COMPONENT_DISPLAY_NAME
|
||||
else:
|
||||
raise ValueError(f"Unsupported provider: {provider}")
|
||||
|
||||
|
|
@ -816,9 +788,8 @@ class FlowsService:
|
|||
self,
|
||||
config,
|
||||
provider: str,
|
||||
target_embedding_id: str,
|
||||
target_llm_id: str,
|
||||
target_llm_text_id: str,
|
||||
target_embedding_name: str,
|
||||
target_llm_name: str,
|
||||
embedding_model: str,
|
||||
llm_model: str,
|
||||
endpoint: str = None,
|
||||
|
|
@ -841,7 +812,7 @@ class FlowsService:
|
|||
|
||||
# Update embedding component
|
||||
if not DISABLE_INGEST_WITH_LANGFLOW:
|
||||
embedding_node = self._find_node_by_id(flow_data, target_embedding_id)
|
||||
embedding_node, _ = self._find_node_in_flow(flow_data, display_name=target_embedding_name)
|
||||
if embedding_node:
|
||||
if self._update_component_fields(
|
||||
embedding_node, provider, embedding_model, endpoint
|
||||
|
|
@ -849,22 +820,14 @@ class FlowsService:
|
|||
updates_made.append(f"embedding model: {embedding_model}")
|
||||
|
||||
# Update LLM component (if exists in this flow)
|
||||
if target_llm_id:
|
||||
llm_node = self._find_node_by_id(flow_data, target_llm_id)
|
||||
if target_llm_name:
|
||||
llm_node, _ = self._find_node_in_flow(flow_data, display_name=target_llm_name)
|
||||
if llm_node:
|
||||
if self._update_component_fields(
|
||||
llm_node, provider, llm_model, endpoint
|
||||
):
|
||||
updates_made.append(f"llm model: {llm_model}")
|
||||
|
||||
if target_llm_text_id:
|
||||
llm_text_node = self._find_node_by_id(flow_data, target_llm_text_id)
|
||||
if llm_text_node:
|
||||
if self._update_component_fields(
|
||||
llm_text_node, provider, llm_model, endpoint
|
||||
):
|
||||
updates_made.append(f"llm model: {llm_model}")
|
||||
|
||||
# If no updates were made, return skip message
|
||||
if not updates_made:
|
||||
return {
|
||||
|
|
|
|||
|
|
@ -98,7 +98,7 @@ class LangflowFileService:
|
|||
|
||||
# Pass metadata via tweaks to OpenSearch component
|
||||
metadata_tweaks = []
|
||||
if owner:
|
||||
if owner or owner is None:
|
||||
metadata_tweaks.append({"key": "owner", "value": owner})
|
||||
if owner_name:
|
||||
metadata_tweaks.append({"key": "owner_name", "value": owner_name})
|
||||
|
|
@ -106,17 +106,18 @@ class LangflowFileService:
|
|||
metadata_tweaks.append({"key": "owner_email", "value": owner_email})
|
||||
if connector_type:
|
||||
metadata_tweaks.append({"key": "connector_type", "value": connector_type})
|
||||
|
||||
if metadata_tweaks:
|
||||
# Initialize the OpenSearch component tweaks if not already present
|
||||
if "OpenSearchHybrid-Ve6bS" not in tweaks:
|
||||
tweaks["OpenSearchHybrid-Ve6bS"] = {}
|
||||
tweaks["OpenSearchHybrid-Ve6bS"]["docs_metadata"] = metadata_tweaks
|
||||
logger.debug(
|
||||
"[LF] Added metadata to tweaks", metadata_count=len(metadata_tweaks)
|
||||
)
|
||||
logger.info(f"[LF] Metadata tweaks {metadata_tweaks}")
|
||||
# if metadata_tweaks:
|
||||
# # Initialize the OpenSearch component tweaks if not already present
|
||||
# if "OpenSearchHybrid-Ve6bS" not in tweaks:
|
||||
# tweaks["OpenSearchHybrid-Ve6bS"] = {}
|
||||
# tweaks["OpenSearchHybrid-Ve6bS"]["docs_metadata"] = metadata_tweaks
|
||||
# logger.debug(
|
||||
# "[LF] Added metadata to tweaks", metadata_count=len(metadata_tweaks)
|
||||
# )
|
||||
if tweaks:
|
||||
payload["tweaks"] = tweaks
|
||||
logger.debug(f"[LF] Tweaks {tweaks}")
|
||||
if session_id:
|
||||
payload["session_id"] = session_id
|
||||
|
||||
|
|
@ -137,9 +138,13 @@ class LangflowFileService:
|
|||
"X-Langflow-Global-Var-OWNER_EMAIL": str(owner_email),
|
||||
"X-Langflow-Global-Var-CONNECTOR_TYPE": str(connector_type),
|
||||
}
|
||||
|
||||
logger.info(f"[LF] Headers {headers}")
|
||||
logger.info(f"[LF] Payload {payload}")
|
||||
resp = await clients.langflow_request(
|
||||
"POST", f"/api/v1/run/{self.flow_id_ingest}", json=payload, headers=headers
|
||||
"POST",
|
||||
f"/api/v1/run/{self.flow_id_ingest}",
|
||||
json=payload,
|
||||
headers=headers,
|
||||
)
|
||||
logger.debug(
|
||||
"[LF] Run response", status_code=resp.status_code, reason=resp.reason_phrase
|
||||
|
|
@ -160,6 +165,7 @@ class LangflowFileService:
|
|||
body=resp.text[:1000],
|
||||
error=str(e),
|
||||
)
|
||||
|
||||
raise
|
||||
return resp_json
|
||||
|
||||
|
|
@ -171,6 +177,10 @@ class LangflowFileService:
|
|||
settings: Optional[Dict[str, Any]] = None,
|
||||
jwt_token: Optional[str] = None,
|
||||
delete_after_ingest: bool = True,
|
||||
owner: Optional[str] = None,
|
||||
owner_name: Optional[str] = None,
|
||||
owner_email: Optional[str] = None,
|
||||
connector_type: Optional[str] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Combined upload, ingest, and delete operation.
|
||||
|
|
@ -257,6 +267,10 @@ class LangflowFileService:
|
|||
session_id=session_id,
|
||||
tweaks=final_tweaks,
|
||||
jwt_token=jwt_token,
|
||||
owner=owner,
|
||||
owner_name=owner_name,
|
||||
owner_email=owner_email,
|
||||
connector_type=connector_type,
|
||||
)
|
||||
logger.debug("[LF] Ingestion completed successfully")
|
||||
except Exception as e:
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue