diff --git a/frontend/src/components/AgGrid/agGridStyles.css b/frontend/src/components/AgGrid/agGridStyles.css
index 590046c2..e307f617 100644
--- a/frontend/src/components/AgGrid/agGridStyles.css
+++ b/frontend/src/components/AgGrid/agGridStyles.css
@@ -10,6 +10,9 @@ body {
--ag-row-hover-color: hsl(var(--muted));
--ag-wrapper-border: none;
--ag-font-family: var(--font-sans);
+ --ag-selected-row-background-color: hsl(var(--accent));
+ --ag-focus-shadow: none;
+ --ag-range-selection-border-color: hsl(var(--primary));
/* Checkbox styling */
--ag-checkbox-background-color: hsl(var(--background));
diff --git a/frontend/src/components/layout-wrapper.tsx b/frontend/src/components/layout-wrapper.tsx
index 79417654..dbd04fea 100644
--- a/frontend/src/components/layout-wrapper.tsx
+++ b/frontend/src/components/layout-wrapper.tsx
@@ -12,12 +12,10 @@ import { KnowledgeFilterPanel } from "@/components/knowledge-filter-panel";
import Logo from "@/components/logo/logo";
import { Navigation } from "@/components/navigation";
import { TaskNotificationMenu } from "@/components/task-notification-menu";
-import { Button } from "@/components/ui/button";
import { UserNav } from "@/components/user-nav";
import { useAuth } from "@/contexts/auth-context";
import { useChat } from "@/contexts/chat-context";
import { useKnowledgeFilter } from "@/contexts/knowledge-filter-context";
-import { LayoutProvider } from "@/contexts/layout-context";
// import { GitHubStarButton } from "@/components/github-star-button"
// import { DiscordLink } from "@/components/discord-link"
import { useTask } from "@/contexts/task-context";
@@ -35,7 +33,7 @@ export function LayoutWrapper({ children }: { children: React.ReactNode }) {
refreshConversations,
startNewConversation,
} = useChat();
- const { isLoading: isSettingsLoading, data: settings } = useGetSettingsQuery({
+ const { isLoading: isSettingsLoading } = useGetSettingsQuery({
enabled: isAuthenticated || isNoAuthMode,
});
const {
@@ -59,6 +57,7 @@ export function LayoutWrapper({ children }: { children: React.ReactNode }) {
// List of paths that should not show navigation
const authPaths = ["/login", "/auth/callback", "/onboarding"];
const isAuthPage = authPaths.includes(pathname);
+ const isOnKnowledgePage = pathname.startsWith("/knowledge");
// List of paths with smaller max-width
const smallWidthPaths = ["/settings", "/settings/connector/new"];
@@ -66,7 +65,7 @@ export function LayoutWrapper({ children }: { children: React.ReactNode }) {
// Calculate active tasks for the bell icon
const activeTasks = tasks.filter(
- task =>
+ (task) =>
task.status === "pending" ||
task.status === "running" ||
task.status === "processing"
@@ -75,14 +74,6 @@ export function LayoutWrapper({ children }: { children: React.ReactNode }) {
const isUnhealthy = health?.status === "unhealthy" || isError;
const isBannerVisible = !isHealthLoading && isUnhealthy;
- // Dynamic height calculations based on banner visibility
- const headerHeight = 53;
- const bannerHeight = 52; // Approximate banner height
- const totalTopOffset = isBannerVisible
- ? headerHeight + bannerHeight
- : headerHeight;
- const mainContentHeight = `calc(100vh - ${totalTopOffset}px)`;
-
// Show loading state when backend isn't ready
if (isLoading || isSettingsLoading) {
return (
@@ -102,9 +93,18 @@ export function LayoutWrapper({ children }: { children: React.ReactNode }) {
// For all other pages, render with Langflow-styled navigation and task menu
return (
-
-
-
+
+
+
+
+
-
+
+ {/* Sidebar Navigation */}
+
-
-
+
+ {/* Main Content */}
+
+
-
- {children}
-
-
+ {children}
+
-
-
+
+ {/* Task Notifications Panel */}
+
+
+ {/* Knowledge Filter Panel */}
+
);
}
diff --git a/frontend/src/components/task-notification-menu.tsx b/frontend/src/components/task-notification-menu.tsx
index fed7e6f1..6cb968e8 100644
--- a/frontend/src/components/task-notification-menu.tsx
+++ b/frontend/src/components/task-notification-menu.tsx
@@ -143,10 +143,10 @@ export function TaskNotificationMenu() {
}
return (
-
+
{/* Header */}
-
+
diff --git a/frontend/src/components/ui/animated-processing-icon.tsx b/frontend/src/components/ui/animated-processing-icon.tsx
index 51815414..431202c1 100644
--- a/frontend/src/components/ui/animated-processing-icon.tsx
+++ b/frontend/src/components/ui/animated-processing-icon.tsx
@@ -1,37 +1,70 @@
-import type { SVGProps } from "react";
+import { cn } from "@/lib/utils";
+import { motion, easeInOut } from "framer-motion";
-export const AnimatedProcessingIcon = (props: SVGProps
) => {
- return (
-
- );
+export const AnimatedProcessingIcon = ({
+ className,
+}: {
+ className?: string;
+}) => {
+ const createAnimationFrames = (delay: number) => ({
+ opacity: [1, 1, 0.5, 0], // Opacity Steps
+ transition: {
+ delay,
+ duration: 1,
+ ease: easeInOut,
+ repeat: Infinity,
+ times: [0, 0.33, 0.66, 1], // Duration Percentages that Correspond to opacity Array
+ },
+ });
+
+ return (
+
+ );
};
diff --git a/frontend/src/components/ui/status-badge.tsx b/frontend/src/components/ui/status-badge.tsx
index e57ad3b5..19270284 100644
--- a/frontend/src/components/ui/status-badge.tsx
+++ b/frontend/src/components/ui/status-badge.tsx
@@ -50,7 +50,7 @@ export const StatusBadge = ({ status, className }: StatusBadgeProps) => {
}`}
>
{status === "processing" && (
-
+
)}
{config.label}
diff --git a/frontend/src/contexts/knowledge-filter-context.tsx b/frontend/src/contexts/knowledge-filter-context.tsx
index 043f6fae..eebf355a 100644
--- a/frontend/src/contexts/knowledge-filter-context.tsx
+++ b/frontend/src/contexts/knowledge-filter-context.tsx
@@ -5,6 +5,7 @@ import React, {
createContext,
type ReactNode,
useContext,
+ useEffect,
useState,
} from "react";
@@ -44,6 +45,8 @@ interface KnowledgeFilterContextType {
createMode: boolean;
startCreateMode: () => void;
endCreateMode: () => void;
+ queryOverride: string;
+ setQueryOverride: (query: string) => void;
}
const KnowledgeFilterContext = createContext<
@@ -73,6 +76,7 @@ export function KnowledgeFilterProvider({
useState
(null);
const [isPanelOpen, setIsPanelOpen] = useState(false);
const [createMode, setCreateMode] = useState(false);
+ const [queryOverride, setQueryOverride] = useState("");
const setSelectedFilter = (filter: KnowledgeFilter | null) => {
setSelectedFilterState(filter);
@@ -136,6 +140,11 @@ export function KnowledgeFilterProvider({
setCreateMode(false);
};
+ // Clear the search override when we change filters
+ useEffect(() => {
+ setQueryOverride("");
+ }, [selectedFilter]);
+
const value: KnowledgeFilterContextType = {
selectedFilter,
parsedFilterData,
@@ -148,6 +157,8 @@ export function KnowledgeFilterProvider({
createMode,
startCreateMode,
endCreateMode,
+ queryOverride,
+ setQueryOverride,
};
return (
diff --git a/frontend/src/contexts/layout-context.tsx b/frontend/src/contexts/layout-context.tsx
deleted file mode 100644
index f40ea28c..00000000
--- a/frontend/src/contexts/layout-context.tsx
+++ /dev/null
@@ -1,34 +0,0 @@
-"use client";
-
-import { createContext, useContext } from "react";
-
-interface LayoutContextType {
- headerHeight: number;
- totalTopOffset: number;
-}
-
-const LayoutContext = createContext(undefined);
-
-export function useLayout() {
- const context = useContext(LayoutContext);
- if (context === undefined) {
- throw new Error("useLayout must be used within a LayoutProvider");
- }
- return context;
-}
-
-export function LayoutProvider({
- children,
- headerHeight,
- totalTopOffset
-}: {
- children: React.ReactNode;
- headerHeight: number;
- totalTopOffset: number;
-}) {
- return (
-
- {children}
-
- );
-}
\ No newline at end of file
diff --git a/src/api/langflow_files.py b/src/api/langflow_files.py
index 0226d4d5..ba4c9877 100644
--- a/src/api/langflow_files.py
+++ b/src/api/langflow_files.py
@@ -56,6 +56,7 @@ async def run_ingestion(
payload = await request.json()
file_ids = payload.get("file_ids")
file_paths = payload.get("file_paths") or []
+ file_metadata = payload.get("file_metadata") or [] # List of {filename, mimetype, size}
session_id = payload.get("session_id")
tweaks = payload.get("tweaks") or {}
settings = payload.get("settings", {})
@@ -66,6 +67,21 @@ async def run_ingestion(
{"error": "Provide file_paths or file_ids"}, status_code=400
)
+ # Build file_tuples from file_metadata if provided, otherwise use empty strings
+ file_tuples = []
+ for i, file_path in enumerate(file_paths):
+ if i < len(file_metadata):
+ meta = file_metadata[i]
+ filename = meta.get("filename", "")
+ mimetype = meta.get("mimetype", "application/octet-stream")
+ # For files already uploaded, we don't have content, so use empty bytes
+ file_tuples.append((filename, b"", mimetype))
+ else:
+ # Extract filename from path if no metadata provided
+ import os
+ filename = os.path.basename(file_path)
+ file_tuples.append((filename, b"", "application/octet-stream"))
+
# Convert UI settings to component tweaks using exact component IDs
if settings:
logger.debug("Applying ingestion settings", settings=settings)
@@ -114,6 +130,7 @@ async def run_ingestion(
result = await langflow_file_service.run_ingestion_flow(
file_paths=file_paths or [],
+ file_tuples=file_tuples,
jwt_token=jwt_token,
session_id=session_id,
tweaks=tweaks,
diff --git a/src/connectors/langflow_connector_service.py b/src/connectors/langflow_connector_service.py
index f79a43d9..b33994e5 100644
--- a/src/connectors/langflow_connector_service.py
+++ b/src/connectors/langflow_connector_service.py
@@ -94,6 +94,7 @@ class LangflowConnectorService:
ingestion_result = await self.langflow_service.run_ingestion_flow(
file_paths=[langflow_file_path],
+ file_tuples=[file_tuple],
jwt_token=jwt_token,
tweaks=tweaks,
owner=owner_user_id,
diff --git a/src/main.py b/src/main.py
index bf6da342..a09d2488 100644
--- a/src/main.py
+++ b/src/main.py
@@ -31,6 +31,7 @@ from api import (
auth,
chat,
connectors,
+ docling,
documents,
flows,
knowledge_filter,
@@ -1111,6 +1112,12 @@ async def create_app():
),
methods=["POST"],
),
+ # Docling service proxy
+ Route(
+ "/docling/health",
+ partial(docling.health),
+ methods=["GET"],
+ ),
]
app = Starlette(debug=True, routes=routes)
diff --git a/src/services/langflow_file_service.py b/src/services/langflow_file_service.py
index 1bce86f0..017431bf 100644
--- a/src/services/langflow_file_service.py
+++ b/src/services/langflow_file_service.py
@@ -60,6 +60,7 @@ class LangflowFileService:
async def run_ingestion_flow(
self,
file_paths: List[str],
+ file_tuples: list[tuple[str, str, str]],
jwt_token: str,
session_id: Optional[str] = None,
tweaks: Optional[Dict[str, Any]] = None,
@@ -67,7 +68,6 @@ class LangflowFileService:
owner_name: Optional[str] = None,
owner_email: Optional[str] = None,
connector_type: Optional[str] = None,
- file_tuples: Optional[list[tuple[str, str, str]]] = None,
) -> Dict[str, Any]:
"""
Trigger the ingestion flow with provided file paths.
@@ -135,14 +135,19 @@ class LangflowFileService:
# To compute the file size in bytes, use len() on the file content (which should be bytes)
file_size_bytes = len(file_tuples[0][1]) if file_tuples and len(file_tuples[0]) > 1 else 0
# Avoid logging full payload to prevent leaking sensitive data (e.g., JWT)
+
+ # Extract file metadata if file_tuples is provided
+ filename = str(file_tuples[0][0]) if file_tuples and len(file_tuples) > 0 else ""
+ mimetype = str(file_tuples[0][2]) if file_tuples and len(file_tuples) > 0 and len(file_tuples[0]) > 2 else ""
+
headers={
"X-Langflow-Global-Var-JWT": str(jwt_token),
"X-Langflow-Global-Var-OWNER": str(owner),
"X-Langflow-Global-Var-OWNER_NAME": str(owner_name),
"X-Langflow-Global-Var-OWNER_EMAIL": str(owner_email),
"X-Langflow-Global-Var-CONNECTOR_TYPE": str(connector_type),
- "X-Langflow-Global-Var-FILENAME": str(file_tuples[0][0]),
- "X-Langflow-Global-Var-MIMETYPE": str(file_tuples[0][2]),
+ "X-Langflow-Global-Var-FILENAME": filename,
+ "X-Langflow-Global-Var-MIMETYPE": mimetype,
"X-Langflow-Global-Var-FILESIZE": str(file_size_bytes),
}
logger.info(f"[LF] Headers {headers}")
@@ -271,14 +276,14 @@ class LangflowFileService:
try:
ingest_result = await self.run_ingestion_flow(
file_paths=[file_path],
+ file_tuples=[file_tuple],
+ jwt_token=jwt_token,
session_id=session_id,
tweaks=final_tweaks,
- jwt_token=jwt_token,
owner=owner,
owner_name=owner_name,
owner_email=owner_email,
connector_type=connector_type,
- file_tuples=[file_tuple],
)
logger.debug("[LF] Ingestion completed successfully")
except Exception as e:
diff --git a/src/tui/managers/docling_manager.py b/src/tui/managers/docling_manager.py
index 6fecfff9..7cb5d1e8 100644
--- a/src/tui/managers/docling_manager.py
+++ b/src/tui/managers/docling_manager.py
@@ -8,6 +8,7 @@ import threading
import time
from typing import Optional, Tuple, Dict, Any, List, AsyncIterator
from utils.logging_config import get_logger
+from utils.container_utils import guess_host_ip_for_containers
logger = get_logger(__name__)
@@ -31,7 +32,7 @@ class DoclingManager:
self._process: Optional[subprocess.Popen] = None
self._port = 5001
- self._host = self._get_host_for_containers() # Get appropriate host IP based on runtime
+ self._host = guess_host_ip_for_containers(logger=logger) # Get appropriate host IP based on runtime
self._running = False
self._external_process = False
@@ -49,136 +50,6 @@ class DoclingManager:
# Try to recover existing process from PID file
self._recover_from_pid_file()
- def _get_host_for_containers(self) -> str:
- """
- Return a host IP that containers can reach (a bridge/CNI gateway).
- Prefers Docker/Podman network gateways; falls back to bridge interfaces.
- """
- import subprocess, json, shutil, re, logging
- logger = logging.getLogger(__name__)
-
- def run(cmd, timeout=2, text=True):
- return subprocess.run(cmd, capture_output=True, text=text, timeout=timeout)
-
- gateways = []
- compose_gateways = [] # Highest priority - compose project networks
- active_gateways = [] # Medium priority - networks with containers
-
- # ---- Docker: enumerate networks and collect gateways
- if shutil.which("docker"):
- try:
- ls = run(["docker", "network", "ls", "--format", "{{.Name}}"])
- if ls.returncode == 0:
- for name in filter(None, ls.stdout.splitlines()):
- try:
- insp = run(["docker", "network", "inspect", name, "--format", "{{json .}}"])
- if insp.returncode == 0 and insp.stdout.strip():
- nw = json.loads(insp.stdout)[0] if insp.stdout.strip().startswith("[") else json.loads(insp.stdout)
- ipam = nw.get("IPAM", {})
- containers = nw.get("Containers", {})
- for cfg in ipam.get("Config", []) or []:
- gw = cfg.get("Gateway")
- if gw:
- # Highest priority: compose networks (ending in _default)
- if name.endswith("_default"):
- compose_gateways.append(gw)
- # Medium priority: networks with active containers
- elif len(containers) > 0:
- active_gateways.append(gw)
- # Low priority: empty networks
- else:
- gateways.append(gw)
- except Exception:
- pass
- except Exception:
- pass
-
- # ---- Podman: enumerate networks and collect gateways (netavark)
- if shutil.which("podman"):
- try:
- # modern podman supports JSON format
- ls = run(["podman", "network", "ls", "--format", "json"])
- if ls.returncode == 0 and ls.stdout.strip():
- for net in json.loads(ls.stdout):
- name = net.get("name") or net.get("Name")
- if not name:
- continue
- try:
- insp = run(["podman", "network", "inspect", name, "--format", "json"])
- if insp.returncode == 0 and insp.stdout.strip():
- arr = json.loads(insp.stdout)
- for item in (arr if isinstance(arr, list) else [arr]):
- for sn in item.get("subnets", []) or []:
- gw = sn.get("gateway")
- if gw:
- # Prioritize compose/project networks
- if name.endswith("_default") or "_" in name:
- compose_gateways.append(gw)
- else:
- gateways.append(gw)
- except Exception:
- pass
- except Exception:
- pass
-
- # ---- Fallback: parse host interfaces for common bridges
- if not gateways:
- try:
- if shutil.which("ip"):
- show = run(["ip", "-o", "-4", "addr", "show"])
- if show.returncode == 0:
- for line in show.stdout.splitlines():
- # e.g. "12: br-3f0f... inet 172.18.0.1/16 ..."
- m = re.search(r"^\d+:\s+([a-zA-Z0-9_.:-]+)\s+.*\binet\s+(\d+\.\d+\.\d+\.\d+)/", line)
- if not m:
- continue
- ifname, ip = m.group(1), m.group(2)
- if ifname == "docker0" or ifname.startswith(("br-", "cni")):
- gateways.append(ip)
- else:
- # As a last resort, try net-tools ifconfig output
- if shutil.which("ifconfig"):
- show = run(["ifconfig"])
- for block in show.stdout.split("\n\n"):
- if any(block.strip().startswith(n) for n in ("docker0", "cni", "br-")):
- m = re.search(r"inet (?:addr:)?(\d+\.\d+\.\d+\.\d+)", block)
- if m:
- gateways.append(m.group(1))
- except Exception:
- pass
-
- # Dedup, prioritizing: 1) compose networks, 2) active networks, 3) all others
- seen, uniq = set(), []
- # First: compose project networks (_default suffix)
- for ip in compose_gateways:
- if ip not in seen:
- uniq.append(ip)
- seen.add(ip)
- # Second: networks with active containers
- for ip in active_gateways:
- if ip not in seen:
- uniq.append(ip)
- seen.add(ip)
- # Third: all other gateways
- for ip in gateways:
- if ip not in seen:
- uniq.append(ip)
- seen.add(ip)
-
- if uniq:
- if len(uniq) > 1:
- logger.info("Container-reachable host IP candidates: %s", ", ".join(uniq))
- else:
- logger.info("Container-reachable host IP: %s", uniq[0])
- return uniq[0]
-
- # Nothing found: warn clearly
- logger.warning(
- "No container bridge IP found. If using rootless Podman (slirp4netns), there is no host bridge; publish ports or use 10.0.2.2 from the container."
- )
- # Returning localhost is honest only for same-namespace; keep it explicit:
- return "127.0.0.1"
-
def cleanup(self):
"""Cleanup resources but keep docling-serve running across sessions."""
# Don't stop the process on exit - let it persist
diff --git a/src/utils/container_utils.py b/src/utils/container_utils.py
index a18d9f1c..14222c84 100644
--- a/src/utils/container_utils.py
+++ b/src/utils/container_utils.py
@@ -136,3 +136,138 @@ def transform_localhost_url(url: str) -> str:
return url.replace(pattern, container_host)
return url
+
+
+def guess_host_ip_for_containers(logger=None) -> str:
+ """Best-effort detection of a host IP reachable from container networks.
+
+ The logic mirrors what the TUI uses when launching docling-serve so that
+ both CLI and API use consistent addresses. Preference order:
+ 1. Docker/Podman compose networks (ended with ``_default``)
+ 2. Networks with active containers
+ 3. Any discovered bridge or CNI gateway interfaces
+
+ Args:
+ logger: Optional logger to emit diagnostics; falls back to module logger.
+
+ Returns:
+ The most appropriate host IP address if discovered, otherwise ``"127.0.0.1"``.
+ """
+ import json
+ import logging
+ import re
+ import shutil
+ import subprocess
+
+ log = logger or logging.getLogger(__name__)
+
+ def run(cmd, timeout=2, text=True):
+ return subprocess.run(cmd, capture_output=True, text=text, timeout=timeout)
+
+ gateways: list[str] = []
+ compose_gateways: list[str] = []
+ active_gateways: list[str] = []
+
+ # ---- Docker networks
+ if shutil.which("docker"):
+ try:
+ ls = run(["docker", "network", "ls", "--format", "{{.Name}}"])
+ if ls.returncode == 0:
+ for name in filter(None, ls.stdout.splitlines()):
+ try:
+ insp = run(["docker", "network", "inspect", name, "--format", "{{json .}}"])
+ if insp.returncode == 0 and insp.stdout.strip():
+ payload = insp.stdout.strip()
+ nw = json.loads(payload)[0] if payload.startswith("[") else json.loads(payload)
+ ipam = nw.get("IPAM", {})
+ containers = nw.get("Containers", {})
+ for cfg in ipam.get("Config", []) or []:
+ gw = cfg.get("Gateway")
+ if not gw:
+ continue
+ if name.endswith("_default"):
+ compose_gateways.append(gw)
+ elif len(containers) > 0:
+ active_gateways.append(gw)
+ else:
+ gateways.append(gw)
+ except Exception:
+ continue
+ except Exception:
+ pass
+
+ # ---- Podman networks
+ if shutil.which("podman"):
+ try:
+ ls = run(["podman", "network", "ls", "--format", "json"])
+ if ls.returncode == 0 and ls.stdout.strip():
+ for net in json.loads(ls.stdout):
+ name = net.get("name") or net.get("Name")
+ if not name:
+ continue
+ try:
+ insp = run(["podman", "network", "inspect", name, "--format", "json"])
+ if insp.returncode == 0 and insp.stdout.strip():
+ arr = json.loads(insp.stdout)
+ for item in (arr if isinstance(arr, list) else [arr]):
+ for sn in item.get("subnets", []) or []:
+ gw = sn.get("gateway")
+ if not gw:
+ continue
+ if name.endswith("_default") or "_" in name:
+ compose_gateways.append(gw)
+ else:
+ gateways.append(gw)
+ except Exception:
+ continue
+ except Exception:
+ pass
+
+ # ---- Host bridge interfaces
+ if not gateways and not compose_gateways and not active_gateways:
+ try:
+ if shutil.which("ip"):
+ show = run(["ip", "-o", "-4", "addr", "show"])
+ if show.returncode == 0:
+ for line in show.stdout.splitlines():
+ match = re.search(r"^\d+:\s+([\w_.:-]+)\s+.*\binet\s+(\d+\.\d+\.\d+\.\d+)/", line)
+ if not match:
+ continue
+ ifname, ip_addr = match.group(1), match.group(2)
+ if ifname == "docker0" or ifname.startswith(("br-", "cni")):
+ gateways.append(ip_addr)
+ elif shutil.which("ifconfig"):
+ show = run(["ifconfig"])
+ for block in show.stdout.split("\n\n"):
+ if any(block.strip().startswith(n) for n in ("docker0", "cni", "br-")):
+ match = re.search(r"inet (?:addr:)?(\d+\.\d+\.\d+\.\d+)", block)
+ if match:
+ gateways.append(match.group(1))
+ except Exception:
+ pass
+
+ seen: set[str] = set()
+ ordered_candidates: list[str] = []
+
+ for collection in (compose_gateways, active_gateways, gateways):
+ for ip_addr in collection:
+ if ip_addr not in seen:
+ ordered_candidates.append(ip_addr)
+ seen.add(ip_addr)
+
+ if ordered_candidates:
+ if len(ordered_candidates) > 1:
+ log.info(
+ "Container-reachable host IP candidates: %s",
+ ", ".join(ordered_candidates),
+ )
+ else:
+ log.info("Container-reachable host IP: %s", ordered_candidates[0])
+
+ return ordered_candidates[0]
+
+ log.warning(
+ "No container bridge IP found. For rootless Podman (slirp4netns) there may be no host bridge; publish ports or use 10.0.2.2 from the container."
+ )
+
+ return "127.0.0.1"