Merge branch 'main' into remove-dotenv

This commit is contained in:
yangdx 2025-10-17 15:06:34 +08:00
commit 06ed2d06a9
22 changed files with 4706 additions and 444 deletions

View file

@ -1,14 +1,20 @@
name: Build Offline Docker Image name: Build Lite Docker Image
on: on:
workflow_dispatch: workflow_dispatch:
inputs:
_notes_:
description: '⚠️ Create lite Docker images only after non-trivial version releases.'
required: false
type: boolean
default: false
permissions: permissions:
contents: read contents: read
packages: write packages: write
jobs: jobs:
build-and-push-offline: build-and-push-lite:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
@ -28,12 +34,12 @@ jobs:
fi fi
echo "tag=$LATEST_TAG" >> $GITHUB_OUTPUT echo "tag=$LATEST_TAG" >> $GITHUB_OUTPUT
- name: Prepare offline tag - name: Prepare lite tag
id: offline_tag id: lite_tag
run: | run: |
OFFLINE_TAG="${{ steps.get_tag.outputs.tag }}-offline" LITE_TAG="${{ steps.get_tag.outputs.tag }}-lite"
echo "Offline image tag: $OFFLINE_TAG" echo "Lite image tag: $LITE_TAG"
echo "offline_tag=$OFFLINE_TAG" >> $GITHUB_OUTPUT echo "lite_tag=$LITE_TAG" >> $GITHUB_OUTPUT
- name: Update version in __init__.py - name: Update version in __init__.py
run: | run: |
@ -56,23 +62,23 @@ jobs:
with: with:
images: ghcr.io/${{ github.repository }} images: ghcr.io/${{ github.repository }}
tags: | tags: |
type=raw,value=${{ steps.offline_tag.outputs.offline_tag }} type=raw,value=${{ steps.lite_tag.outputs.lite_tag }}
type=raw,value=offline type=raw,value=lite
- name: Build and push offline Docker image - name: Build and push lite Docker image
uses: docker/build-push-action@v5 uses: docker/build-push-action@v5
with: with:
context: . context: .
file: ./Dockerfile.offline file: ./Dockerfile.lite
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64
push: true push: true
tags: ${{ steps.meta.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }} labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha cache-from: type=gha
cache-to: type=gha,mode=max cache-to: type=gha,mode=min
- name: Output image details - name: Output image details
run: | run: |
echo "Offline Docker image built and pushed successfully!" echo "Lite Docker image built and pushed successfully!"
echo "Image tag: ghcr.io/${{ github.repository }}:${{ steps.offline_tag.outputs.offline_tag }}" echo "Image tag: ghcr.io/${{ github.repository }}:${{ steps.lite_tag.outputs.lite_tag }}"
echo "Base Git tag used: ${{ steps.get_tag.outputs.tag }}" echo "Base Git tag used: ${{ steps.get_tag.outputs.tag }}"

View file

@ -2,6 +2,12 @@ name: Build Test Docker Image manually
on: on:
workflow_dispatch: workflow_dispatch:
inputs:
_notes_:
description: '⚠️ Please create a new git tag before building the docker image.'
required: false
type: boolean
default: false
permissions: permissions:
contents: read contents: read
@ -58,6 +64,7 @@ jobs:
uses: docker/build-push-action@v5 uses: docker/build-push-action@v5
with: with:
context: . context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64
push: true push: true
tags: ${{ steps.meta.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}

View file

@ -66,6 +66,7 @@ jobs:
uses: docker/build-push-action@v5 uses: docker/build-push-action@v5
with: with:
context: . context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64
push: true push: true
tags: ${{ steps.meta.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}

View file

@ -6,74 +6,96 @@ WORKDIR /app
# Copy frontend source code # Copy frontend source code
COPY lightrag_webui/ ./lightrag_webui/ COPY lightrag_webui/ ./lightrag_webui/
# Build frontend # Build frontend assets for inclusion in the API package
RUN cd lightrag_webui && \ RUN cd lightrag_webui \
bun install --frozen-lockfile && \ && bun install --frozen-lockfile \
bun run build && bun run build
# Python build stage # Python build stage - using uv for faster package installation
FROM python:3.12-slim AS builder FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim AS builder
ENV DEBIAN_FRONTEND=noninteractive
ENV UV_SYSTEM_PYTHON=1
ENV UV_COMPILE_BYTECODE=1
WORKDIR /app WORKDIR /app
# Upgrade pip、setuptools and wheel to the latest version # Install system deps (Rust is required by some wheels)
RUN pip install --upgrade pip setuptools wheel RUN apt-get update \
&& apt-get install -y --no-install-recommends \
# Install Rust and required build dependencies curl \
RUN apt-get update && apt-get install -y \ build-essential \
curl \ pkg-config \
build-essential \
pkg-config \
&& rm -rf /var/lib/apt/lists/* \ && rm -rf /var/lib/apt/lists/* \
&& curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y \ && curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
&& . $HOME/.cargo/env
# Copy pyproject.toml and source code for dependency installation ENV PATH="/root/.cargo/bin:/root/.local/bin:${PATH}"
# Ensure shared data directory exists for uv caches
RUN mkdir -p /root/.local/share/uv
# Copy project metadata and sources
COPY pyproject.toml . COPY pyproject.toml .
COPY setup.py . COPY setup.py .
COPY uv.lock .
# Install base, API, and offline extras without the project to improve caching
RUN uv sync --frozen --no-dev --extra api --extra offline --no-install-project --no-editable
# Copy project sources after dependency layer
COPY lightrag/ ./lightrag/ COPY lightrag/ ./lightrag/
# Copy frontend build output from frontend-builder stage # Include pre-built frontend assets from the previous stage
COPY --from=frontend-builder /app/lightrag/api/webui ./lightrag/api/webui COPY --from=frontend-builder /app/lightrag/api/webui ./lightrag/api/webui
# Install dependencies # Sync project in non-editable mode and ensure pip is available for runtime installs
ENV PATH="/root/.cargo/bin:${PATH}" RUN uv sync --frozen --no-dev --extra api --extra offline --no-editable \
RUN pip install --user --no-cache-dir --use-pep517 . && /app/.venv/bin/python -m ensurepip --upgrade
RUN pip install --user --no-cache-dir --use-pep517 .[api]
# Install depndencies for default storage # Prepare offline cache directory and pre-populate tiktoken data
RUN pip install --user --no-cache-dir nano-vectordb networkx # Use uv run to execute commands from the virtual environment
# Install depndencies for default LLM RUN mkdir -p /app/data/tiktoken \
RUN pip install --user --no-cache-dir openai ollama tiktoken && uv run lightrag-download-cache --cache-dir /app/data/tiktoken || status=$?; \
# Install depndencies for default document loader if [ -n "${status:-}" ] && [ "$status" -ne 0 ] && [ "$status" -ne 2 ]; then exit "$status"; fi
RUN pip install --user --no-cache-dir pypdf2 python-docx python-pptx openpyxl
# Final stage # Final stage
FROM python:3.12-slim FROM python:3.12-slim
WORKDIR /app WORKDIR /app
# Upgrade pip and setuptools # Install uv for package management
RUN pip install --upgrade pip setuptools wheel COPY --from=ghcr.io/astral-sh/uv:latest /uv /usr/local/bin/uv
# Copy only necessary files from builder ENV UV_SYSTEM_PYTHON=1
# Copy installed packages and application code
COPY --from=builder /root/.local /root/.local COPY --from=builder /root/.local /root/.local
COPY --from=builder /app/.venv /app/.venv
COPY --from=builder /app/lightrag ./lightrag COPY --from=builder /app/lightrag ./lightrag
COPY pyproject.toml .
COPY setup.py . COPY setup.py .
COPY uv.lock .
RUN pip install --use-pep517 ".[api]" # Ensure the installed scripts are on PATH
# Make sure scripts in .local are usable ENV PATH=/app/.venv/bin:/root/.local/bin:$PATH
ENV PATH=/root/.local/bin:$PATH
# Create necessary directories # Install dependencies with uv sync (uses locked versions from uv.lock)
RUN mkdir -p /app/data/rag_storage /app/data/inputs # And ensure pip is available for runtime installs
RUN uv sync --frozen --no-dev --extra api --extra offline --no-editable \
&& /app/.venv/bin/python -m ensurepip --upgrade
# Docker data directories # Create persistent data directories AFTER package installation
RUN mkdir -p /app/data/rag_storage /app/data/inputs /app/data/tiktoken
# Copy offline cache into the newly created directory
COPY --from=builder /app/data/tiktoken /app/data/tiktoken
# Point to the prepared cache
ENV TIKTOKEN_CACHE_DIR=/app/data/tiktoken
ENV WORKING_DIR=/app/data/rag_storage ENV WORKING_DIR=/app/data/rag_storage
ENV INPUT_DIR=/app/data/inputs ENV INPUT_DIR=/app/data/inputs
# Expose the default port # Expose API port
EXPOSE 9621 EXPOSE 9621
# Set entrypoint
ENTRYPOINT ["python", "-m", "lightrag.api.lightrag_server"] ENTRYPOINT ["python", "-m", "lightrag.api.lightrag_server"]

View file

@ -11,16 +11,17 @@ RUN cd lightrag_webui \
&& bun install --frozen-lockfile \ && bun install --frozen-lockfile \
&& bun run build && bun run build
# Python build stage # Python build stage - using uv for package installation
FROM python:3.12-slim AS builder FROM ghcr.io/astral-sh/uv:python3.12-bookworm-slim AS builder
ENV DEBIAN_FRONTEND=noninteractive ENV DEBIAN_FRONTEND=noninteractive
ENV UV_SYSTEM_PYTHON=1
ENV UV_COMPILE_BYTECODE=1
WORKDIR /app WORKDIR /app
# Upgrade packaging tools and install system deps (Rust is required by some wheels) # Install system dependencies required by some wheels
RUN pip install --upgrade pip setuptools wheel \ RUN apt-get update \
&& apt-get update \
&& apt-get install -y --no-install-recommends \ && apt-get install -y --no-install-recommends \
curl \ curl \
build-essential \ build-essential \
@ -30,27 +31,31 @@ RUN pip install --upgrade pip setuptools wheel \
ENV PATH="/root/.cargo/bin:/root/.local/bin:${PATH}" ENV PATH="/root/.cargo/bin:/root/.local/bin:${PATH}"
# Ensure shared data directory exists for uv caches
RUN mkdir -p /root/.local/share/uv
# Copy project metadata and sources # Copy project metadata and sources
COPY pyproject.toml . COPY pyproject.toml .
COPY setup.py . COPY setup.py .
COPY requirements-offline*.txt ./ COPY uv.lock .
COPY constraints-offline.txt .
# Install project dependencies (base + API extras) without the project to improve caching
RUN uv sync --frozen --no-dev --extra api --no-install-project --no-editable
# Copy project sources after dependency layer
COPY lightrag/ ./lightrag/ COPY lightrag/ ./lightrag/
# Include pre-built frontend assets from the previous stage # Include pre-built frontend assets from the previous stage
COPY --from=frontend-builder /app/lightrag/api/webui ./lightrag/api/webui COPY --from=frontend-builder /app/lightrag/api/webui ./lightrag/api/webui
# Install LightRAG with API extras and all offline dependencies in a single step # Sync project in non-editable mode and ensure pip is available for runtime installs
# This prevents version conflicts from multiple installation passes RUN uv sync --frozen --no-dev --extra api --no-editable \
# Use constraints file for reproducible builds with exact versions && /app/.venv/bin/python -m ensurepip --upgrade
RUN pip install --user --no-cache-dir --use-pep517 \
--upgrade-strategy=only-if-needed \
--constraint constraints-offline.txt \
.[api] -r requirements-offline.txt
# Prepare offline cache directory and pre-populate tiktoken data # Prepare tiktoken cache directory and pre-populate tokenizer data
# Ignore exit code 2 which indicates assets already cached
RUN mkdir -p /app/data/tiktoken \ RUN mkdir -p /app/data/tiktoken \
&& lightrag-download-cache --cache-dir /app/data/tiktoken || status=$?; \ && uv run lightrag-download-cache --cache-dir /app/data/tiktoken || status=$?; \
if [ -n "${status:-}" ] && [ "$status" -ne 0 ] && [ "$status" -ne 2 ]; then exit "$status"; fi if [ -n "${status:-}" ] && [ "$status" -ne 0 ] && [ "$status" -ne 2 ]; then exit "$status"; fi
# Final stage # Final stage
@ -58,34 +63,34 @@ FROM python:3.12-slim
WORKDIR /app WORKDIR /app
RUN pip install --upgrade pip setuptools wheel # Install uv for package management
COPY --from=ghcr.io/astral-sh/uv:latest /uv /usr/local/bin/uv
ENV UV_SYSTEM_PYTHON=1
# Copy installed packages and application code # Copy installed packages and application code
COPY --from=builder /root/.local /root/.local COPY --from=builder /root/.local /root/.local
COPY --from=builder /app/.venv /app/.venv
COPY --from=builder /app/lightrag ./lightrag COPY --from=builder /app/lightrag ./lightrag
COPY pyproject.toml . COPY pyproject.toml .
COPY setup.py . COPY setup.py .
COPY requirements-offline*.txt ./ COPY uv.lock .
COPY constraints-offline.txt .
# Ensure the installed scripts are on PATH # Ensure the installed scripts are on PATH
ENV PATH=/root/.local/bin:$PATH ENV PATH=/app/.venv/bin:/root/.local/bin:$PATH
# Install editable package for runtime (re-using cached wheels) and verify extras # Sync dependencies inside the final image using uv
# IMPORTANT: Must be done BEFORE creating data/ directory to avoid setuptools error # And ensure pip is available for runtime installs
# Use single installation to prevent version conflicts with exact version constraints RUN uv sync --frozen --no-dev --extra api --no-editable \
RUN pip install --no-cache-dir --use-pep517 \ && /app/.venv/bin/python -m ensurepip --upgrade
--upgrade-strategy=only-if-needed \
--constraint constraints-offline.txt \
".[api]" -r requirements-offline.txt
# Create persistent data directories AFTER package installation # Create persistent data directories
RUN mkdir -p /app/data/rag_storage /app/data/inputs /app/data/tiktoken RUN mkdir -p /app/data/rag_storage /app/data/inputs /app/data/tiktoken
# Copy offline cache into the newly created directory # Copy cached tokenizer assets prepared in the builder stage
COPY --from=builder /app/data/tiktoken /app/data/tiktoken COPY --from=builder /app/data/tiktoken /app/data/tiktoken
# Point to the prepared cache # Docker data directories
ENV TIKTOKEN_CACHE_DIR=/app/data/tiktoken ENV TIKTOKEN_CACHE_DIR=/app/data/tiktoken
ENV WORKING_DIR=/app/data/rag_storage ENV WORKING_DIR=/app/data/rag_storage
ENV INPUT_DIR=/app/data/inputs ENV INPUT_DIR=/app/data/inputs
@ -93,4 +98,5 @@ ENV INPUT_DIR=/app/data/inputs
# Expose API port # Expose API port
EXPOSE 9621 EXPOSE 9621
# Set entrypoint
ENTRYPOINT ["python", "-m", "lightrag.api.lightrag_server"] ENTRYPOINT ["python", "-m", "lightrag.api.lightrag_server"]

View file

@ -1,160 +0,0 @@
# Exact version constraints based on successful local installation with uv pip install ".[offline]"
# Generated: 2025-10-15
# Use with: pip install --constraint constraints-offline.txt -r requirements-offline.txt
accelerate==1.10.1
# AWS/Boto packages
aioboto3==15.2.0
aiobotocore==2.24.2
# Utility packages
aioitertools==0.12.0
aiolimiter==1.2.1
aiosqlite==0.21.0
anthropic==0.69.0
antlr4-python3-runtime==4.9.3
asyncpg
beautifulsoup4==4.14.2
boto3==1.40.18
botocore==1.40.18
cachetools==6.2.1
colorama==0.4.6
colorlog==6.9.0
dataclasses-json==0.6.7
defusedxml==0.7.1
deprecated==1.2.18
dill==0.4.0
dirtyjson==1.0.8
dnspython==2.8.0
# Document processing packages
docling==2.57.0
docling-core==2.48.4
docling-ibm-models==3.9.1
docling-parse==4.5.0
docstring-parser==0.17.0
faker==37.11.0
filetype==1.2.0
fsspec==2025.9.0
greenlet==3.2.4
griffe==1.14.0
grpcio==1.75.1
h2==4.3.0
hpack==4.1.0
huggingface-hub==0.35.3
hyperframe==6.1.0
jinja2==3.1.6
jmespath==1.0.1
joblib==1.5.2
jsonlines==3.1.0
jsonpatch==1.33
jsonpointer==3.0.0
jsonref==1.1.0
jsonschema==4.25.1
jsonschema-specifications==2025.9.1
langchain-core==0.3.79
langchain-text-splitters==0.3.11
langsmith==0.4.35
latex2mathml==3.78.1
llama-cloud==0.1.35
llama-cloud-services==0.6.54
# LlamaIndex packages
llama-index==0.14.4
llama-index-cli==0.5.3
llama-index-core==0.14.4
llama-index-embeddings-openai==0.5.1
llama-index-indices-managed-llama-cloud==0.9.4
llama-index-instrumentation==0.4.2
llama-index-llms-openai==0.6.4
llama-index-readers-file==0.5.4
llama-index-readers-llama-parse==0.5.1
llama-index-workflows==2.8.0
llama-parse==0.6.54
lxml==5.4.0
markdown-it-py==4.0.0
marko==2.2.1
markupsafe==3.0.3
marshmallow==3.26.1
mdurl==0.1.2
mpire==2.10.2
mpmath==1.3.0
multiprocess==0.70.18
mypy-extensions==1.1.1
neo4j==6.0.2
nest-asyncio==1.6.0
# NLP packages
nltk==3.9.2
numpy
ollama==0.6.0
omegaconf==2.3.0
# LLM Provider packages
openai==1.109.1
# Computer vision
opencv-python==4.11.0.86
opencv-python-headless==4.11.0.86
openpyxl==3.1.5
orjson==3.11.3
# Data processing
pandas==2.2.3
pillow==11.3.0
pluggy==1.6.0
polyfactory==2.22.2
portalocker==3.2.0
protobuf==6.32.1
pyclipper==1.3.0.post6
pydantic-settings==2.11.0
pygments==2.19.2
# Authentication
pyjwt==2.8.0
pylatexenc==2.10
pymilvus==2.6.2
pymongo==4.15.3
pypdf==6.1.1
pypdfium2==4.30.0
python-docx==1.2.0
python-pptx==1.0.2
qdrant-client==1.15.1
rapidocr==3.4.2
# Storage backends
redis==6.4.0
referencing==0.37.0
requests-toolbelt==1.0.0
rich==14.2.0
rpds-py==0.27.1
rtree==1.4.1
s3transfer==0.13.1
safetensors==0.6.2
scipy==1.16.2
semchunk==2.2.2
shapely==2.1.2
shellingham==1.5.4
soupsieve==2.8
sqlalchemy==2.0.44
striprtf==0.0.26
sympy==1.14.0
tabulate==0.9.0
tokenizers==0.22.1
# Core ML/AI packages
torch==2.2.2
torchvision==0.17.2
transformers==4.57.1
typer==0.19.2
typing-inspect==0.9.0
ujson==5.11.0
voyageai==0.3.5
wrapt==1.17.3
zhipuai==2.1.5.20250825
zstandard==0.25.0
# Special packages (platform-specific)
# Note: These may not be available on all platforms
# ocrmac==1.0.0 # macOS only
# pyobjc-* packages are macOS only

77
docker-build-push.sh Executable file
View file

@ -0,0 +1,77 @@
#!/bin/bash
set -e
# Configuration
IMAGE_NAME="ghcr.io/hkuds/lightrag"
DOCKERFILE="Dockerfile"
TAG="latest"
# Get version from git tags
VERSION=$(git describe --tags --abbrev=0 2>/dev/null || echo "dev")
echo "=================================="
echo " Multi-Architecture Docker Build"
echo "=================================="
echo "Image: ${IMAGE_NAME}:${TAG}"
echo "Version: ${VERSION}"
echo "Platforms: linux/amd64, linux/arm64"
echo "=================================="
echo ""
# Check Docker login status (skip if CR_PAT is set for CI/CD)
if [ -z "$CR_PAT" ]; then
if ! docker info 2>/dev/null | grep -q "Username"; then
echo "⚠️ Warning: Not logged in to Docker registry"
echo "Please login first: docker login ghcr.io"
echo "Or set CR_PAT environment variable for automated login"
echo ""
read -p "Continue anyway? (y/n) " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
exit 1
fi
fi
else
echo "Using CR_PAT environment variable for authentication"
fi
# Check if buildx builder exists, create if not
if ! docker buildx ls | grep -q "desktop-linux"; then
echo "Creating buildx builder..."
docker buildx create --name desktop-linux --use
docker buildx inspect --bootstrap
else
echo "Using existing buildx builder: desktop-linux"
docker buildx use desktop-linux
fi
echo ""
echo "Building and pushing multi-architecture image..."
echo ""
# Build and push
docker buildx build \
--platform linux/amd64,linux/arm64 \
--file ${DOCKERFILE} \
--tag ${IMAGE_NAME}:${TAG} \
--tag ${IMAGE_NAME}:${VERSION} \
--push \
.
echo ""
echo "✓ Build and push complete!"
echo ""
echo "Images pushed:"
echo " - ${IMAGE_NAME}:${TAG}"
echo " - ${IMAGE_NAME}:${VERSION}"
echo ""
echo "Verifying multi-architecture manifest..."
echo ""
# Verify
docker buildx imagetools inspect ${IMAGE_NAME}:${TAG}
echo ""
echo "✓ Verification complete!"
echo ""
echo "Pull with: docker pull ${IMAGE_NAME}:${TAG}"

View file

@ -14,11 +14,8 @@ services:
- ./data/inputs:/app/data/inputs - ./data/inputs:/app/data/inputs
- ./config.ini:/app/config.ini - ./config.ini:/app/config.ini
- ./.env:/app/.env - ./.env:/app/.env
# - ./data/tiktoken:/app/data/tiktoken
env_file: env_file:
- .env - .env
environment:
- TIKTOKEN_CACHE_DIR=/app/data/tiktoken
restart: unless-stopped restart: unless-stopped
extra_hosts: extra_hosts:
- "host.docker.internal:host-gateway" - "host.docker.internal:host-gateway"

View file

@ -1,17 +1,11 @@
# LightRAG # LightRAG Docker Deployment
A lightweight Knowledge Graph Retrieval-Augmented Generation system with multiple LLM backend support. A lightweight Knowledge Graph Retrieval-Augmented Generation system with multiple LLM backend support.
## 🚀 Installation ## 🚀 Preparation
### Prerequisites ### Clone the repository:
- Python 3.10+
- Git
- Docker (optional for Docker deployment)
### Native Installation
1. Clone the repository:
```bash ```bash
# Linux/MacOS # Linux/MacOS
git clone https://github.com/HKUDS/LightRAG.git git clone https://github.com/HKUDS/LightRAG.git
@ -23,7 +17,8 @@ git clone https://github.com/HKUDS/LightRAG.git
cd LightRAG cd LightRAG
``` ```
2. Configure your environment: ### Configure your environment:
```bash ```bash
# Linux/MacOS # Linux/MacOS
cp .env.example .env cp .env.example .env
@ -35,141 +30,92 @@ Copy-Item .env.example .env
# Edit .env with your preferred configuration # Edit .env with your preferred configuration
``` ```
3. Create and activate virtual environment: LightRAG can be configured using environment variables in the `.env` file:
```bash
# Linux/MacOS
python -m venv venv
source venv/bin/activate
```
```powershell
# Windows PowerShell
python -m venv venv
.\venv\Scripts\Activate
```
4. Install dependencies: **Server Configuration**
```bash
# Both platforms - `HOST`: Server host (default: 0.0.0.0)
pip install -r requirements.txt - `PORT`: Server port (default: 9621)
```
**LLM Configuration**
- `LLM_BINDING`: LLM backend to use (lollms/ollama/openai)
- `LLM_BINDING_HOST`: LLM server host URL
- `LLM_MODEL`: Model name to use
**Embedding Configuration**
- `EMBEDDING_BINDING`: Embedding backend (lollms/ollama/openai)
- `EMBEDDING_BINDING_HOST`: Embedding server host URL
- `EMBEDDING_MODEL`: Embedding model name
**RAG Configuration**
- `MAX_ASYNC`: Maximum async operations
- `MAX_TOKENS`: Maximum token size
- `EMBEDDING_DIM`: Embedding dimensions
## 🐳 Docker Deployment ## 🐳 Docker Deployment
Docker instructions work the same on all platforms with Docker Desktop installed. Docker instructions work the same on all platforms with Docker Desktop installed.
1. Build and start the container: ### Start LightRAG server:
```bash ```bash
docker-compose up -d docker-compose up -d
``` ```
### Configuration Options LightRAG Server uses the following paths for data storage:
LightRAG can be configured using environment variables in the `.env` file:
#### Server Configuration
- `HOST`: Server host (default: 0.0.0.0)
- `PORT`: Server port (default: 9621)
#### LLM Configuration
- `LLM_BINDING`: LLM backend to use (lollms/ollama/openai)
- `LLM_BINDING_HOST`: LLM server host URL
- `LLM_MODEL`: Model name to use
#### Embedding Configuration
- `EMBEDDING_BINDING`: Embedding backend (lollms/ollama/openai)
- `EMBEDDING_BINDING_HOST`: Embedding server host URL
- `EMBEDDING_MODEL`: Embedding model name
#### RAG Configuration
- `MAX_ASYNC`: Maximum async operations
- `MAX_TOKENS`: Maximum token size
- `EMBEDDING_DIM`: Embedding dimensions
#### Security
- `LIGHTRAG_API_KEY`: API key for authentication
### Data Storage Paths
The system uses the following paths for data storage:
``` ```
data/ data/
├── rag_storage/ # RAG data persistence ├── rag_storage/ # RAG data persistence
└── inputs/ # Input documents └── inputs/ # Input documents
``` ```
### Example Deployments ### Updates
1. Using with Ollama:
```env
LLM_BINDING=ollama
LLM_BINDING_HOST=http://host.docker.internal:11434
LLM_MODEL=mistral
EMBEDDING_BINDING=ollama
EMBEDDING_BINDING_HOST=http://host.docker.internal:11434
EMBEDDING_MODEL=bge-m3
```
you can't just use localhost from docker, that's why you need to use host.docker.internal which is defined in the docker compose file and should allow you to access the localhost services.
2. Using with OpenAI:
```env
LLM_BINDING=openai
LLM_MODEL=gpt-3.5-turbo
EMBEDDING_BINDING=openai
EMBEDDING_MODEL=text-embedding-ada-002
OPENAI_API_KEY=your-api-key
```
### API Usage
Once deployed, you can interact with the API at `http://localhost:9621`
Example query using PowerShell:
```powershell
$headers = @{
"X-API-Key" = "your-api-key"
"Content-Type" = "application/json"
}
$body = @{
query = "your question here"
} | ConvertTo-Json
Invoke-RestMethod -Uri "http://localhost:9621/query" -Method Post -Headers $headers -Body $body
```
Example query using curl:
```bash
curl -X POST "http://localhost:9621/query" \
-H "X-API-Key: your-api-key" \
-H "Content-Type: application/json" \
-d '{"query": "your question here"}'
```
## 🔒 Security
Remember to:
1. Set a strong API key in production
2. Use SSL in production environments
3. Configure proper network security
## 📦 Updates
To update the Docker container: To update the Docker container:
```bash ```bash
docker-compose pull docker-compose pull
docker-compose up -d --build docker-compose down
docker-compose up
``` ```
To update native installation: ### Offline deployment
Software packages requiring `transformers`, `torch`, or `cuda` will is not preinstalled in the dokcer images. Consequently, document extraction tools such as Docling, as well as local LLM models like Hugging Face and LMDeploy, can not be used in an off line enviroment. These high-compute-resource-demanding services should not be integrated into LightRAG. Docling will be decoupled and deployed as a standalone service.
## 📦 Build Docker Images
### For local development and testing
```bash ```bash
# Linux/MacOS # Build and run with docker-compose
git pull docker compose up --build
source venv/bin/activate
pip install -r requirements.txt
``` ```
```powershell
# Windows PowerShell ### For production release
git pull
.\venv\Scripts\Activate **multi-architecture build and push**:
pip install -r requirements.txt
```bash
# Use the provided build script
./docker-build-push.sh
``` ```
**The build script will**:
- Check Docker registry login status
- Create/use buildx builder automatically
- Build for both AMD64 and ARM64 architectures
- Push to GitHub Container Registry (ghcr.io)
- Verify the multi-architecture manifest
**Prerequisites**:
Before building multi-architecture images, ensure you have:
- Docker 20.10+ with Buildx support
- Sufficient disk space (20GB+ recommended for offline image)
- Registry access credentials (if pushing images)

View file

@ -2,6 +2,10 @@
This guide provides comprehensive instructions for deploying LightRAG in offline environments where internet access is limited or unavailable. This guide provides comprehensive instructions for deploying LightRAG in offline environments where internet access is limited or unavailable.
If you deploy LightRAG using Docker, there is no need to refer to this document, as the LightRAG Docker image is pre-configured for offline operation.
> Software packages requiring `transformers`, `torch`, or `cuda` will not be included in the offline dependency group. Consequently, document extraction tools such as Docling, as well as local LLM models like Hugging Face and LMDeploy, are outside the scope of offline installation support. These high-compute-resource-demanding services should not be integrated into LightRAG. Docling will be decoupled and deployed as a standalone service.
## Table of Contents ## Table of Contents
- [Overview](#overview) - [Overview](#overview)
@ -76,6 +80,8 @@ LightRAG provides flexible dependency groups for different use cases:
| `offline-llm` | LLM providers | OpenAI, Anthropic, Ollama, etc. | | `offline-llm` | LLM providers | OpenAI, Anthropic, Ollama, etc. |
| `offline` | All of the above | Complete offline deployment | | `offline` | All of the above | Complete offline deployment |
> Software packages requiring `transformers`, `torch`, or `cuda` will not be included in the offline dependency group.
### Installation Examples ### Installation Examples
```bash ```bash

170
docs/UV_LOCK_GUIDE.md Normal file
View file

@ -0,0 +1,170 @@
# uv.lock Update Guide
## What is uv.lock?
`uv.lock` is uv's lock file. It captures the exact version of every dependency, including transitive ones, much like:
- Node.js `package-lock.json`
- Rust `Cargo.lock`
- Python Poetry `poetry.lock`
Keeping `uv.lock` in version control guarantees that everyone installs the same dependency set.
## When does uv.lock change?
### Situations where it does *not* change automatically
- Running `uv sync --frozen`
- Building Docker images that call `uv sync --frozen`
- Editing source code without touching dependency metadata
### Situations where it will change
1. **`uv lock` or `uv lock --upgrade`**
```bash
uv lock # Resolve according to current constraints
uv lock --upgrade # Re-resolve and upgrade to the newest compatible releases
```
Use these commands after modifying `pyproject.toml`, when you want fresh dependency versions, or if the lock file was deleted or corrupted.
2. **`uv add`**
```bash
uv add requests # Adds the dependency and updates both files
uv add --dev pytest # Adds a dev dependency
```
`uv add` edits `pyproject.toml` and refreshes `uv.lock` in one step.
3. **`uv remove`**
```bash
uv remove requests
```
This removes the dependency from `pyproject.toml` and rewrites `uv.lock`.
4. **`uv sync` without `--frozen`**
```bash
uv sync
```
Normally this only installs what is already locked. However, if `pyproject.toml` and `uv.lock` disagree or the lock file is missing, uv will regenerate and update `uv.lock`. In CI and production builds you should prefer `uv sync --frozen` to prevent unintended updates.
## Example workflows
### Scenario 1: Add a new dependency
```bash
# Recommended: let uv handle both files
uv add fastapi
git add pyproject.toml uv.lock
git commit -m "Add fastapi dependency"
# Manual alternative
# 1. Edit pyproject.toml
# 2. Regenerate the lock file
uv lock
git add pyproject.toml uv.lock
git commit -m "Add fastapi dependency"
```
### Scenario 2: Relax or tighten a version constraint
```bash
# 1. Edit the requirement in pyproject.toml,
# e.g. openai>=1.0.0,<2.0.0 -> openai>=1.5.0,<2.0.0
# 2. Re-resolve the lock file
uv lock
# 3. Commit both files
git add pyproject.toml uv.lock
git commit -m "Update openai to >=1.5.0"
```
### Scenario 3: Upgrade everything to the newest compatible versions
```bash
uv lock --upgrade
git diff uv.lock
git add uv.lock
git commit -m "Upgrade dependencies to latest compatible versions"
```
### Scenario 4: Teammate syncing the project
```bash
git pull # Fetch latest code and lock file
uv sync --frozen # Install exactly what uv.lock specifies
```
## Using uv.lock in Docker
```dockerfile
RUN uv sync --frozen --no-dev --extra api
```
`--frozen` guarantees reproducible builds because uv will refuse to deviate from the locked versions.
`--extra api` install API server
## Generating a lock file that includes offline dependencies
If you need `uv.lock` to capture the optional offline stacks, regenerate it with the relevant extras enabled:
```bash
uv lock --extra api --extra offline
```
This command resolves the base project requirements plus both the `api` and `offline` optional dependency sets, ensuring downstream `uv sync --frozen --extra api --extra offline` installs work without further resolution.
## Frequently asked questions
- **`uv.lock` is almost 1MB. Does that matter?**
No. The file is read only during dependency resolution.
- **Should we commit `uv.lock`?**
Yes. Commit it so collaborators and CI jobs share the same dependency graph.
- **Deleted the lock file by accident?**
Run `uv lock` to regenerate it from `pyproject.toml`.
- **Can `uv.lock` and `requirements.txt` coexist?**
They can, but maintaining both is redundant. Prefer relying on `uv.lock` alone whenever possible.
- **How do I inspect locked versions?**
```bash
uv tree
grep -A5 'name = "openai"' uv.lock
```
## Best practices
### Recommended
1. Commit `uv.lock` alongside `pyproject.toml`.
2. Use `uv sync --frozen` in CI, Docker, and other reproducible environments.
3. Use plain `uv sync` during local development if you want uv to reconcile the lock for you.
4. Run `uv lock --upgrade` periodically to pick up the latest compatible releases.
5. Regenerate the lock file immediately after changing dependency constraints.
### Avoid
1. Running `uv sync` without `--frozen` in CI or production pipelines.
2. Editing `uv.lock` by hand—uv will overwrite manual edits.
3. Ignoring lock file diffs in code reviews—unexpected dependency changes can break builds.
## Summary
| Command | Updates `uv.lock` | Typical use |
|-----------------------|-------------------|-------------------------------------------|
| `uv lock` | ✅ Yes | After editing constraints |
| `uv lock --upgrade` | ✅ Yes | Upgrade to the newest compatible versions |
| `uv add <pkg>` | ✅ Yes | Add a dependency |
| `uv remove <pkg>` | ✅ Yes | Remove a dependency |
| `uv sync` | ⚠️ Maybe | Local development; can regenerate the lock |
| `uv sync --frozen` | ❌ No | CI/CD, Docker, reproducible builds |
Remember: `uv.lock` only changes when you run a command that tells it to. Keep it in sync with your project and commit it whenever it changes.

View file

@ -23,7 +23,7 @@ WEBUI_DESCRIPTION="Simple and Fast Graph Based RAG System"
# WORKING_DIR=<absolute_path_for_working_dir> # WORKING_DIR=<absolute_path_for_working_dir>
### Tiktoken cache directory (Store cached files in this folder for offline deployment) ### Tiktoken cache directory (Store cached files in this folder for offline deployment)
# TIKTOKEN_CACHE_DIR=./temp/tiktoken # TIKTOKEN_CACHE_DIR=/app/data/tiktoken
### Ollama Emulating Model and Tag ### Ollama Emulating Model and Tag
# OLLAMA_EMULATING_MODEL_NAME=lightrag # OLLAMA_EMULATING_MODEL_NAME=lightrag

View file

@ -1,5 +1,5 @@
from .lightrag import LightRAG as LightRAG, QueryParam as QueryParam from .lightrag import LightRAG as LightRAG, QueryParam as QueryParam
__version__ = "1.4.9.3" __version__ = "1.4.9.4"
__author__ = "Zirui Guo" __author__ = "Zirui Guo"
__url__ = "https://github.com/HKUDS/LightRAG" __url__ = "https://github.com/HKUDS/LightRAG"

View file

@ -118,36 +118,10 @@ lightrag-gunicorn --workers 4
### 使用 Docker 启动 LightRAG 服务器 ### 使用 Docker 启动 LightRAG 服务器
* 配置 .env 文件: 使用 Docker Compose 是部署和运行 LightRAG Server 最便捷的方式。
通过复制示例文件 [`env.example`](env.example) 创建个性化的 .env 文件,并根据实际需求设置 LLM 及 Embedding 参数。 - 创建一个项目目录。
* 创建一个名为 docker-compose.yml 的文件: - 将 LightRAG 仓库中的 `docker-compose.yml` 文件复制到您的项目目录中。
- 准备 `.env` 文件:复制示例文件 [`env.example`](https://ai.znipower.com:5013/c/env.example) 创建自定义的 `.env` 文件,并根据您的具体需求配置 LLM 和嵌入参数。
```yaml
services:
lightrag:
container_name: lightrag
image: ghcr.io/hkuds/lightrag:latest
build:
context: .
dockerfile: Dockerfile
tags:
- ghcr.io/hkuds/lightrag:latest
ports:
- "${PORT:-9621}:9621"
volumes:
- ./data/rag_storage:/app/data/rag_storage
- ./data/inputs:/app/data/inputs
- ./data/tiktoken:/app/data/tiktoken
- ./config.ini:/app/config.ini
- ./.env:/app/.env
env_file:
- .env
environment:
- TIKTOKEN_CACHE_DIR=/app/data/tiktoken
restart: unless-stopped
extra_hosts:
- "host.docker.internal:host-gateway"
```
* 通过以下命令启动 LightRAG 服务器: * 通过以下命令启动 LightRAG 服务器:
@ -155,11 +129,11 @@ services:
docker compose up docker compose up
# 如果希望启动后让程序退到后台运行,需要在命令的最后添加 -d 参数 # 如果希望启动后让程序退到后台运行,需要在命令的最后添加 -d 参数
``` ```
> 可以通过以下链接获取官方的docker compose文件[docker-compose.yml]( https://raw.githubusercontent.com/HKUDS/LightRAG/refs/heads/main/docker-compose.yml) 。如需获取LightRAG的历史版本镜像可以访问以下链接: [LightRAG Docker Images]( https://github.com/HKUDS/LightRAG/pkgs/container/lightrag) > 可以通过以下链接获取官方的docker compose文件[docker-compose.yml]( https://raw.githubusercontent.com/HKUDS/LightRAG/refs/heads/main/docker-compose.yml) 。如需获取LightRAG的历史版本镜像可以访问以下链接: [LightRAG Docker Images]( https://github.com/HKUDS/LightRAG/pkgs/container/lightrag). 如需获取更多关于docker部署的信息请参阅 [DockerDeployment.md](./../../docs/DockerDeployment.md).
### 离线部署 ### 离线部署
对于离线或隔离环境,请参阅[离线部署指南](./../../docs/OfflineDeployment.md),了解如何预先安装所有依赖项和缓存文件 官方的 LightRAG Docker 镜像完全兼容离线或隔离网络环境。如需搭建自己的离线部署环境,请参考 [离线部署指南](./../../docs/OfflineDeployment.md)。
### 启动多个LightRAG实例 ### 启动多个LightRAG实例

View file

@ -119,37 +119,13 @@ During startup, configurations in the `.env` file can be overridden by command-l
### Launching LightRAG Server with Docker ### Launching LightRAG Server with Docker
* Prepare the .env file: Using Docker Compose is the most convenient way to deploy and run the LightRAG Server.
Create a personalized .env file by copying the sample file [`env.example`](env.example). Configure the LLM and embedding parameters according to your requirements.
* Create a file named `docker-compose.yml`: * Create a project directory.
```yaml * Copy the `docker-compose.yml` file from the LightRAG repository into your project directory.
services:
lightrag: * Prepare the `.env` file: Duplicate the sample file [`env.example`](https://ai.znipower.com:5013/c/env.example)to create a customized `.env` file, and configure the LLM and embedding parameters according to your specific requirements.
container_name: lightrag
image: ghcr.io/hkuds/lightrag:latest
build:
context: .
dockerfile: Dockerfile
tags:
- ghcr.io/hkuds/lightrag:latest
ports:
- "${PORT:-9621}:9621"
volumes:
- ./data/rag_storage:/app/data/rag_storage
- ./data/inputs:/app/data/inputs
- ./data/tiktoken:/app/data/tiktoken
- ./config.ini:/app/config.ini
- ./.env:/app/.env
env_file:
- .env
environment:
- TIKTOKEN_CACHE_DIR=/app/data/tiktoken
restart: unless-stopped
extra_hosts:
- "host.docker.internal:host-gateway"
```
* Start the LightRAG Server with the following command: * Start the LightRAG Server with the following command:
@ -158,11 +134,11 @@ docker compose up
# If you want the program to run in the background after startup, add the -d parameter at the end of the command. # If you want the program to run in the background after startup, add the -d parameter at the end of the command.
``` ```
> You can get the official docker compose file from here: [docker-compose.yml](https://raw.githubusercontent.com/HKUDS/LightRAG/refs/heads/main/docker-compose.yml). For historical versions of LightRAG docker images, visit this link: [LightRAG Docker Images](https://github.com/HKUDS/LightRAG/pkgs/container/lightrag) You can get the official docker compose file from here: [docker-compose.yml](https://raw.githubusercontent.com/HKUDS/LightRAG/refs/heads/main/docker-compose.yml). For historical versions of LightRAG docker images, visit this link: [LightRAG Docker Images](https://github.com/HKUDS/LightRAG/pkgs/container/lightrag). For more details about docker deployment, please refer to [DockerDeployment.md](./../../docs/DockerDeployment.md).
### Offline Deployment ### Offline Deployment
For offline or air-gapped environments, see the [Offline Deployment Guide](./../../docs/OfflineDeployment.md) for instructions on pre-installing all dependencies and cache files. Official LightRAG Docker images are fully compatible with offline or air-gapped environments. If you want to build up you own offline enviroment, please refer to [Offline Deployment Guide](./../../docs/OfflineDeployment.md).
### Starting Multiple LightRAG Instances ### Starting Multiple LightRAG Instances

View file

@ -908,7 +908,9 @@ def create_app(args):
async def get_response(self, path: str, scope): async def get_response(self, path: str, scope):
response = await super().get_response(path, scope) response = await super().get_response(path, scope)
if path.endswith(".html"): is_html = path.endswith(".html") or response.media_type == "text/html"
if is_html:
response.headers["Cache-Control"] = ( response.headers["Cache-Control"] = (
"no-cache, no-store, must-revalidate" "no-cache, no-store, must-revalidate"
) )

View file

@ -3332,7 +3332,11 @@ async def _build_query_context(
query_embedding=search_result["query_embedding"], query_embedding=search_result["query_embedding"],
) )
if not merged_chunks: if (
not merged_chunks
and not truncation_result["entities_context"]
and not truncation_result["relations_context"]
):
return None return None
# Stage 4: Build final LLM context with dynamic token processing # Stage 4: Build final LLM context with dynamic token processing

View file

@ -80,7 +80,6 @@ api = [
# Offline deployment dependencies (layered design for flexibility) # Offline deployment dependencies (layered design for flexibility)
offline-docs = [ offline-docs = [
# Document processing dependencies # Document processing dependencies
"docling>=1.0.0,<3.0.0",
"pypdf2>=3.0.0", "pypdf2>=3.0.0",
"python-docx>=0.8.11,<2.0.0", "python-docx>=0.8.11,<2.0.0",
"python-pptx>=0.6.21,<2.0.0", "python-pptx>=0.6.21,<2.0.0",
@ -106,8 +105,6 @@ offline-llm = [
"aioboto3>=12.0.0,<16.0.0", "aioboto3>=12.0.0,<16.0.0",
"voyageai>=0.2.0,<1.0.0", "voyageai>=0.2.0,<1.0.0",
"llama-index>=0.9.0,<1.0.0", "llama-index>=0.9.0,<1.0.0",
"transformers>=4.30.0,<5.0.0",
"torch>=2.0.0,<2.3.0",
] ]
offline = [ offline = [

View file

@ -8,7 +8,6 @@
# Or use constraints: pip install --constraint constraints-offline.txt -r requirements-offline-docs.txt # Or use constraints: pip install --constraint constraints-offline.txt -r requirements-offline-docs.txt
# Document processing dependencies (with version constraints matching pyproject.toml) # Document processing dependencies (with version constraints matching pyproject.toml)
docling>=1.0.0,<3.0.0
openpyxl>=3.0.0,<4.0.0 openpyxl>=3.0.0,<4.0.0
pypdf2>=3.0.0 pypdf2>=3.0.0
python-docx>=0.8.11,<2.0.0 python-docx>=0.8.11,<2.0.0

View file

@ -13,7 +13,5 @@ anthropic>=0.18.0,<1.0.0
llama-index>=0.9.0,<1.0.0 llama-index>=0.9.0,<1.0.0
ollama>=0.1.0,<1.0.0 ollama>=0.1.0,<1.0.0
openai>=1.0.0,<2.0.0 openai>=1.0.0,<2.0.0
torch>=2.0.0,<2.3.0
transformers>=4.30.0,<5.0.0
voyageai>=0.2.0,<1.0.0 voyageai>=0.2.0,<1.0.0
zhipuai>=2.0.0,<3.0.0 zhipuai>=2.0.0,<3.0.0

View file

@ -15,7 +15,6 @@ anthropic>=0.18.0,<1.0.0
asyncpg>=0.29.0,<1.0.0 asyncpg>=0.29.0,<1.0.0
# Document processing dependencies # Document processing dependencies
docling>=1.0.0,<3.0.0
llama-index>=0.9.0,<1.0.0 llama-index>=0.9.0,<1.0.0
neo4j>=5.0.0,<7.0.0 neo4j>=5.0.0,<7.0.0
ollama>=0.1.0,<1.0.0 ollama>=0.1.0,<1.0.0
@ -28,7 +27,5 @@ python-docx>=0.8.11,<2.0.0
python-pptx>=0.6.21,<2.0.0 python-pptx>=0.6.21,<2.0.0
qdrant-client>=1.7.0,<2.0.0 qdrant-client>=1.7.0,<2.0.0
redis>=5.0.0,<7.0.0 redis>=5.0.0,<7.0.0
torch>=2.0.0,<2.3.0
transformers>=4.30.0,<5.0.0
voyageai>=0.2.0,<1.0.0 voyageai>=0.2.0,<1.0.0
zhipuai>=2.0.0,<3.0.0 zhipuai>=2.0.0,<3.0.0

4237
uv.lock generated Normal file

File diff suppressed because it is too large Load diff