Merge branch 'main' into cohere-rerank
This commit is contained in:
commit
8e50eef58b
29 changed files with 786 additions and 466 deletions
206
.github/dependabot.yml
vendored
Normal file
206
.github/dependabot.yml
vendored
Normal file
|
|
@ -0,0 +1,206 @@
|
|||
# Keep GitHub Actions up to date with GitHub's Dependabot...
|
||||
# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/keeping-your-actions-up-to-date-with-dependabot
|
||||
# https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem
|
||||
version: 2
|
||||
updates:
|
||||
# ============================================================
|
||||
# GitHub Actions
|
||||
# PR Strategy:
|
||||
# - All updates (major/minor/patch): Grouped into a single PR
|
||||
# ============================================================
|
||||
- package-ecosystem: github-actions
|
||||
directory: /
|
||||
groups:
|
||||
github-actions:
|
||||
patterns:
|
||||
- "*" # Group all Actions updates into a single larger pull request
|
||||
schedule:
|
||||
interval: weekly
|
||||
day: monday
|
||||
time: "02:00"
|
||||
timezone: "Asia/Shanghai"
|
||||
labels:
|
||||
- "dependencies"
|
||||
- "github-actions"
|
||||
open-pull-requests-limit: 2
|
||||
|
||||
# ============================================================
|
||||
# Python (pip) Dependencies
|
||||
# PR Strategy:
|
||||
# - Major updates: Individual PR per package (except numpy which is ignored)
|
||||
# - Minor updates: Grouped by category (llm-providers, storage, etc.)
|
||||
# - Patch updates: Grouped by category
|
||||
# ============================================================
|
||||
- package-ecosystem: "pip"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
day: "wednesday"
|
||||
time: "02:00"
|
||||
timezone: "Asia/Shanghai"
|
||||
cooldown:
|
||||
default-days: 5
|
||||
semver-major-days: 30
|
||||
semver-minor-days: 7
|
||||
semver-patch-days: 3
|
||||
groups:
|
||||
# Core dependencies - LLM providers and embeddings
|
||||
llm-providers:
|
||||
patterns:
|
||||
- "openai"
|
||||
- "anthropic"
|
||||
- "google-*"
|
||||
- "boto3"
|
||||
- "botocore"
|
||||
- "ollama"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
# Storage backends
|
||||
storage:
|
||||
patterns:
|
||||
- "neo4j"
|
||||
- "pymongo"
|
||||
- "redis"
|
||||
- "psycopg*"
|
||||
- "asyncpg"
|
||||
- "milvus*"
|
||||
- "qdrant*"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
# Data processing and ML
|
||||
data-processing:
|
||||
patterns:
|
||||
- "numpy"
|
||||
- "scipy"
|
||||
- "pandas"
|
||||
- "tiktoken"
|
||||
- "transformers"
|
||||
- "torch*"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
# Web framework and API
|
||||
web-framework:
|
||||
patterns:
|
||||
- "fastapi"
|
||||
- "uvicorn"
|
||||
- "gunicorn"
|
||||
- "starlette"
|
||||
- "pydantic*"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
# Development and testing tools
|
||||
dev-tools:
|
||||
patterns:
|
||||
- "pytest*"
|
||||
- "ruff"
|
||||
- "pre-commit"
|
||||
- "black"
|
||||
- "mypy"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
# Minor and patch updates for everything else
|
||||
python-minor-patch:
|
||||
patterns:
|
||||
- "*"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
ignore:
|
||||
- dependency-name: "numpy"
|
||||
update-types:
|
||||
- "version-update:semver-major"
|
||||
labels:
|
||||
- "dependencies"
|
||||
- "python"
|
||||
open-pull-requests-limit: 5
|
||||
|
||||
# ============================================================
|
||||
# Frontend (bun) Dependencies
|
||||
# PR Strategy:
|
||||
# - Major updates: Individual PR per package
|
||||
# - Minor updates: Grouped by category (react, ui-components, etc.)
|
||||
# - Patch updates: Grouped by category
|
||||
# ============================================================
|
||||
- package-ecosystem: "bun"
|
||||
directory: "/lightrag_webui"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
day: "friday"
|
||||
time: "02:00"
|
||||
timezone: "Asia/Shanghai"
|
||||
cooldown:
|
||||
default-days: 5
|
||||
semver-major-days: 30
|
||||
semver-minor-days: 7
|
||||
semver-patch-days: 3
|
||||
groups:
|
||||
# React ecosystem
|
||||
react:
|
||||
patterns:
|
||||
- "react"
|
||||
- "react-dom"
|
||||
- "react-router*"
|
||||
- "@types/react*"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
# UI components and styling
|
||||
ui-components:
|
||||
patterns:
|
||||
- "@radix-ui/*"
|
||||
- "tailwind*"
|
||||
- "@tailwindcss/*"
|
||||
- "lucide-react"
|
||||
- "class-variance-authority"
|
||||
- "clsx"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
# Graph visualization
|
||||
graph-viz:
|
||||
patterns:
|
||||
- "sigma"
|
||||
- "@sigma/*"
|
||||
- "graphology*"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
# Build tools and dev dependencies
|
||||
build-tools:
|
||||
patterns:
|
||||
- "vite"
|
||||
- "@vitejs/*"
|
||||
- "typescript"
|
||||
- "eslint*"
|
||||
- "@eslint/*"
|
||||
- "typescript-eslint"
|
||||
- "prettier"
|
||||
- "prettier-*"
|
||||
- "@types/bun"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
# Content rendering libraries (math, diagrams, etc.)
|
||||
content-rendering:
|
||||
patterns:
|
||||
- "katex"
|
||||
- "mermaid"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
# All other minor and patch updates
|
||||
frontend-minor-patch:
|
||||
patterns:
|
||||
- "*"
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
labels:
|
||||
- "dependencies"
|
||||
- "frontend"
|
||||
open-pull-requests-limit: 5
|
||||
4
.github/workflows/copilot-setup-steps.yml
vendored
4
.github/workflows/copilot-setup-steps.yml
vendored
|
|
@ -23,10 +23,10 @@ jobs:
|
|||
# If you do not check out your code, Copilot will do this for you.
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Python 3.11
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
|
|
|
|||
4
.github/workflows/docker-build-lite.yml
vendored
4
.github/workflows/docker-build-lite.yml
vendored
|
|
@ -18,7 +18,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
|
@ -66,7 +66,7 @@ jobs:
|
|||
type=raw,value=lite
|
||||
|
||||
- name: Build and push lite Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile.lite
|
||||
|
|
|
|||
4
.github/workflows/docker-build-manual.yml
vendored
4
.github/workflows/docker-build-manual.yml
vendored
|
|
@ -18,7 +18,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0 # Fetch all history for tags
|
||||
|
||||
|
|
@ -61,7 +61,7 @@ jobs:
|
|||
type=raw,value=${{ steps.get_tag.outputs.tag }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
|
|
|
|||
4
.github/workflows/docker-publish.yml
vendored
4
.github/workflows/docker-publish.yml
vendored
|
|
@ -14,7 +14,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0 # Fetch all history for tags
|
||||
|
||||
|
|
@ -63,7 +63,7 @@ jobs:
|
|||
type=raw,value=latest,enable=${{ steps.check_prerelease.outputs.is_prerelease == 'false' }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
|
|
|
|||
5
.github/workflows/linting.yaml
vendored
5
.github/workflows/linting.yaml
vendored
|
|
@ -10,14 +10,15 @@ on:
|
|||
|
||||
jobs:
|
||||
lint-and-format:
|
||||
name: Linting and Formatting
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v2
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: '3.x'
|
||||
|
||||
|
|
|
|||
10
.github/workflows/pypi-publish.yml
vendored
10
.github/workflows/pypi-publish.yml
vendored
|
|
@ -13,13 +13,13 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0 # Fetch all history for tags
|
||||
|
||||
# Build frontend WebUI
|
||||
- name: Setup Bun
|
||||
uses: oven-sh/setup-bun@v1
|
||||
uses: oven-sh/setup-bun@v2
|
||||
with:
|
||||
bun-version: latest
|
||||
|
||||
|
|
@ -40,7 +40,7 @@ jobs:
|
|||
echo "Frontend files:"
|
||||
ls -lh lightrag/api/webui/ | head -10
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
- uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
|
|
@ -64,7 +64,7 @@ jobs:
|
|||
python -m build
|
||||
|
||||
- name: Upload distributions
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: release-dists
|
||||
path: dist/
|
||||
|
|
@ -81,7 +81,7 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Retrieve release distributions
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
name: release-dists
|
||||
path: dist/
|
||||
|
|
|
|||
2
.github/workflows/stale.yaml
vendored
2
.github/workflows/stale.yaml
vendored
|
|
@ -13,7 +13,7 @@ jobs:
|
|||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v9
|
||||
- uses: actions/stale@v10
|
||||
with:
|
||||
days-before-stale: 90 # 90 days
|
||||
days-before-close: 7 # 7 days after marked as stale
|
||||
|
|
|
|||
8
.github/workflows/tests.yml
vendored
8
.github/workflows/tests.yml
vendored
|
|
@ -13,13 +13,13 @@ jobs:
|
|||
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.10', '3.11', '3.12']
|
||||
python-version: ['3.12', '3.13', '3.14']
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v5
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
|
|
@ -45,7 +45,7 @@ jobs:
|
|||
|
||||
- name: Upload test results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v5
|
||||
with:
|
||||
name: test-results-py${{ matrix.python-version }}
|
||||
path: |
|
||||
|
|
|
|||
55
README-zh.md
55
README-zh.md
|
|
@ -407,6 +407,11 @@ LightRAG 需要利用LLM和Embeding模型来完成文档索引和知识库查询
|
|||
* LightRAG还支持类OpenAI的聊天/嵌入API:
|
||||
|
||||
```python
|
||||
import os
|
||||
import numpy as np
|
||||
from lightrag.utils import wrap_embedding_func_with_attrs
|
||||
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
||||
|
||||
async def llm_model_func(
|
||||
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
||||
) -> str:
|
||||
|
|
@ -420,8 +425,9 @@ async def llm_model_func(
|
|||
**kwargs
|
||||
)
|
||||
|
||||
@wrap_embedding_func_with_attrs(embedding_dim=4096, max_token_size=8192)
|
||||
async def embedding_func(texts: list[str]) -> np.ndarray:
|
||||
return await openai_embed(
|
||||
return await openai_embed.func(
|
||||
texts,
|
||||
model="solar-embedding-1-large-query",
|
||||
api_key=os.getenv("UPSTAGE_API_KEY"),
|
||||
|
|
@ -432,16 +438,17 @@ async def initialize_rag():
|
|||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=4096,
|
||||
func=embedding_func
|
||||
)
|
||||
embedding_func=embedding_func # 直接传入装饰后的函数
|
||||
)
|
||||
|
||||
await rag.initialize_storages()
|
||||
return rag
|
||||
```
|
||||
|
||||
> **关于嵌入函数封装的重要说明:**
|
||||
>
|
||||
> `EmbeddingFunc` 不能嵌套封装。已经被 `@wrap_embedding_func_with_attrs` 装饰过的嵌入函数(如 `openai_embed`、`ollama_embed` 等)不能再次使用 `EmbeddingFunc()` 封装。这就是为什么在创建自定义嵌入函数时,我们调用 `xxx_embed.func`(底层未封装的函数)而不是直接调用 `xxx_embed`。
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
|
|
@ -478,19 +485,20 @@ rag = LightRAG(
|
|||
然后您只需要按如下方式设置LightRAG:
|
||||
|
||||
```python
|
||||
import numpy as np
|
||||
from lightrag.utils import wrap_embedding_func_with_attrs
|
||||
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
|
||||
|
||||
@wrap_embedding_func_with_attrs(embedding_dim=768, max_token_size=8192)
|
||||
async def embedding_func(texts: list[str]) -> np.ndarray:
|
||||
return await ollama_embed.func(texts, embed_model="nomic-embed-text")
|
||||
|
||||
# 使用Ollama模型初始化LightRAG
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=ollama_model_complete, # 使用Ollama模型进行文本生成
|
||||
llm_model_name='your_model_name', # 您的模型名称
|
||||
# 使用Ollama嵌入函数
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=768,
|
||||
func=lambda texts: ollama_embed(
|
||||
texts,
|
||||
embed_model="nomic-embed-text"
|
||||
)
|
||||
),
|
||||
embedding_func=embedding_func, # 直接传入装饰后的函数
|
||||
)
|
||||
```
|
||||
|
||||
|
|
@ -529,22 +537,27 @@ ollama create -f Modelfile qwen2m
|
|||
您可以使用`llm_model_kwargs`参数配置ollama:
|
||||
|
||||
```python
|
||||
import numpy as np
|
||||
from lightrag.utils import wrap_embedding_func_with_attrs
|
||||
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
|
||||
|
||||
@wrap_embedding_func_with_attrs(embedding_dim=768, max_token_size=8192)
|
||||
async def embedding_func(texts: list[str]) -> np.ndarray:
|
||||
return await ollama_embed.func(texts, embed_model="nomic-embed-text")
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=ollama_model_complete, # 使用Ollama模型进行文本生成
|
||||
llm_model_name='your_model_name', # 您的模型名称
|
||||
llm_model_kwargs={"options": {"num_ctx": 32768}},
|
||||
# 使用Ollama嵌入函数
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=768,
|
||||
func=lambda texts: ollama_embed(
|
||||
texts,
|
||||
embed_model="nomic-embed-text"
|
||||
)
|
||||
),
|
||||
embedding_func=embedding_func, # 直接传入装饰后的函数
|
||||
)
|
||||
```
|
||||
|
||||
> **关于嵌入函数封装的重要说明:**
|
||||
>
|
||||
> `EmbeddingFunc` 不能嵌套封装。已经被 `@wrap_embedding_func_with_attrs` 装饰过的嵌入函数(如 `openai_embed`、`ollama_embed` 等)不能再次使用 `EmbeddingFunc()` 封装。这就是为什么在创建自定义嵌入函数时,我们调用 `xxx_embed.func`(底层未封装的函数)而不是直接调用 `xxx_embed`。
|
||||
|
||||
* **低RAM GPU**
|
||||
|
||||
为了在低RAM GPU上运行此实验,您应该选择小型模型并调整上下文窗口(增加上下文会增加内存消耗)。例如,在6Gb RAM的改装挖矿GPU上运行这个ollama示例需要将上下文大小设置为26k,同时使用`gemma2:2b`。它能够在`book.txt`中找到197个实体和19个关系。
|
||||
|
|
|
|||
99
README.md
99
README.md
|
|
@ -51,24 +51,24 @@
|
|||
|
||||
---
|
||||
## 🎉 News
|
||||
- [2025.11.05]🎯[New Feature]: Integrated **RAGAS for Evaluation** and **Langfuse for Tracing**. Updated the API to return retrieved contexts alongside query results to support context precision metrics.
|
||||
- [2025.10.22]🎯[Scalability Enhancement]: Eliminated processing bottlenecks to support **Large-Scale Datasets Efficiently**.
|
||||
- [2025.09.15]🎯Significantly enhances KG extraction accuracy for **small LLMs** like Qwen3-30B-A3B.
|
||||
- [2025.08.29]🎯**Reranker** is supported now , significantly boosting performance for mixed queries(Set as default query mode now).
|
||||
- [2025.08.04]🎯**Document deletion** with KG regeneration to ensure query performance.
|
||||
- [2025.06.16]🎯Our team has released [RAG-Anything](https://github.com/HKUDS/RAG-Anything) an All-in-One Multimodal RAG System for seamless text, image, table, and equation processing.
|
||||
- [2025.06.05]🎯LightRAG now supports comprehensive multimodal data handling through [RAG-Anything](https://github.com/HKUDS/RAG-Anything) integration, enabling seamless document parsing and RAG capabilities across diverse formats including PDFs, images, Office documents, tables, and formulas. Please refer to the new [multimodal section](https://github.com/HKUDS/LightRAG/?tab=readme-ov-file#multimodal-document-processing-rag-anything-integration) for details.
|
||||
- [2025.03.18]🎯LightRAG now supports citation functionality, enabling proper source attribution.
|
||||
- [2025.02.12]🎯You can now use MongoDB as all in-one Storage.
|
||||
- [2025.02.05]🎯Our team has released [VideoRAG](https://github.com/HKUDS/VideoRAG) understanding extremely long-context videos.
|
||||
- [2025.01.13]🎯Our team has released [MiniRAG](https://github.com/HKUDS/MiniRAG) making RAG simpler with small models.
|
||||
- [2025.01.06]🎯You can now use PostgreSQL as all in-one Storage.
|
||||
- [2024.11.19]🎯A comprehensive guide to LightRAG is now available on [LearnOpenCV](https://learnopencv.com/lightrag). Many thanks to the blog author.
|
||||
- [2024.11.09]🎯Introducing the LightRAG Webui, which allows you to insert, query, visualize LightRAG knowledge.
|
||||
- [2024.11.04]🎯You can now [use Neo4J for Storage](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#using-neo4j-for-storage).
|
||||
- [2024.10.18]🎯We've added a link to a [LightRAG Introduction Video](https://youtu.be/oageL-1I0GE). Thanks to the author!
|
||||
- [2024.10.17]🎯We have created a [Discord channel](https://discord.gg/yF2MmDJyGJ)! Welcome to join for sharing and discussions! 🎉🎉
|
||||
- [2024.10.16]🎯LightRAG now supports [Ollama models](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#quick-start)!
|
||||
- [2025.11]🎯[New Feature]: Integrated **RAGAS for Evaluation** and **Langfuse for Tracing**. Updated the API to return retrieved contexts alongside query results to support context precision metrics.
|
||||
- [2025.10]🎯[Scalability Enhancement]: Eliminated processing bottlenecks to support **Large-Scale Datasets Efficiently**.
|
||||
- [2025.09]🎯[New Feature] Enhances knowledge graph extraction accuracy for **Open-Sourced LLMs** such as Qwen3-30B-A3B.
|
||||
- [2025.08]🎯[New Feature] **Reranker** is now supported, significantly boosting performance for mixed queries (set as default query mode).
|
||||
- [2025.08]🎯[New Feature] Added **Document Deletion** with automatic KG regeneration to ensure optimal query performance.
|
||||
- [2025.06]🎯[New Release] Our team has released [RAG-Anything](https://github.com/HKUDS/RAG-Anything) — an **All-in-One Multimodal RAG** system for seamless processing of text, images, tables, and equations.
|
||||
- [2025.06]🎯[New Feature] LightRAG now supports comprehensive multimodal data handling through [RAG-Anything](https://github.com/HKUDS/RAG-Anything) integration, enabling seamless document parsing and RAG capabilities across diverse formats including PDFs, images, Office documents, tables, and formulas. Please refer to the new [multimodal section](https://github.com/HKUDS/LightRAG/?tab=readme-ov-file#multimodal-document-processing-rag-anything-integration) for details.
|
||||
- [2025.03]🎯[New Feature] LightRAG now supports citation functionality, enabling proper source attribution and enhanced document traceability.
|
||||
- [2025.02]🎯[New Feature] You can now use MongoDB as an all-in-one storage solution for unified data management.
|
||||
- [2025.02]🎯[New Release] Our team has released [VideoRAG](https://github.com/HKUDS/VideoRAG)-a RAG system for understanding extremely long-context videos
|
||||
- [2025.01]🎯[New Release] Our team has released [MiniRAG](https://github.com/HKUDS/MiniRAG) making RAG simpler with small models.
|
||||
- [2025.01]🎯You can now use PostgreSQL as an all-in-one storage solution for data management.
|
||||
- [2024.11]🎯[New Resource] A comprehensive guide to LightRAG is now available on [LearnOpenCV](https://learnopencv.com/lightrag). — explore in-depth tutorials and best practices. Many thanks to the blog author for this excellent contribution!
|
||||
- [2024.11]🎯[New Feature] Introducing the LightRAG WebUI — an interface that allows you to insert, query, and visualize LightRAG knowledge through an intuitive web-based dashboard.
|
||||
- [2024.11]🎯[New Feature] You can now [use Neo4J for Storage](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#using-neo4j-for-storage)-enabling graph database support.
|
||||
- [2024.10]🎯[New Feature] We've added a link to a [LightRAG Introduction Video](https://youtu.be/oageL-1I0GE). — a walkthrough of LightRAG's capabilities. Thanks to the author for this excellent contribution!
|
||||
- [2024.10]🎯[New Channel] We have created a [Discord channel](https://discord.gg/yF2MmDJyGJ)!💬 Welcome to join our community for sharing, discussions, and collaboration! 🎉🎉
|
||||
- [2024.10]🎯[New Feature] LightRAG now supports [Ollama models](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#quick-start)!
|
||||
|
||||
<details>
|
||||
<summary style="font-size: 1.4em; font-weight: bold; cursor: pointer; display: list-item;">
|
||||
|
|
@ -214,7 +214,7 @@ For a streaming response implementation example, please see `examples/lightrag_o
|
|||
|
||||
**Note 2**: Only `lightrag_openai_demo.py` and `lightrag_openai_compatible_demo.py` are officially supported sample codes. Other sample files are community contributions that haven't undergone full testing and optimization.
|
||||
|
||||
## Programing with LightRAG Core
|
||||
## Programming with LightRAG Core
|
||||
|
||||
> ⚠️ **If you would like to integrate LightRAG into your project, we recommend utilizing the REST API provided by the LightRAG Server**. LightRAG Core is typically intended for embedded applications or for researchers who wish to conduct studies and evaluations.
|
||||
|
||||
|
|
@ -313,7 +313,7 @@ A full list of LightRAG init parameters:
|
|||
| **vector_db_storage_cls_kwargs** | `dict` | Additional parameters for vector database, like setting the threshold for nodes and relations retrieval | cosine_better_than_threshold: 0.2(default value changed by env var COSINE_THRESHOLD) |
|
||||
| **enable_llm_cache** | `bool` | If `TRUE`, stores LLM results in cache; repeated prompts return cached responses | `TRUE` |
|
||||
| **enable_llm_cache_for_entity_extract** | `bool` | If `TRUE`, stores LLM results in cache for entity extraction; Good for beginners to debug your application | `TRUE` |
|
||||
| **addon_params** | `dict` | Additional parameters, e.g., `{"language": "Simplified Chinese", "entity_types": ["organization", "person", "location", "event"]}`: sets example limit, entiy/relation extraction output language | language: English` |
|
||||
| **addon_params** | `dict` | Additional parameters, e.g., `{"language": "Simplified Chinese", "entity_types": ["organization", "person", "location", "event"]}`: sets example limit, entity/relation extraction output language | language: English` |
|
||||
| **embedding_cache_config** | `dict` | Configuration for question-answer caching. Contains three parameters: `enabled`: Boolean value to enable/disable cache lookup functionality. When enabled, the system will check cached responses before generating new answers. `similarity_threshold`: Float value (0-1), similarity threshold. When a new question's similarity with a cached question exceeds this threshold, the cached answer will be returned directly without calling the LLM. `use_llm_check`: Boolean value to enable/disable LLM similarity verification. When enabled, LLM will be used as a secondary check to verify the similarity between questions before returning cached answers. | Default: `{"enabled": False, "similarity_threshold": 0.95, "use_llm_check": False}` |
|
||||
|
||||
</details>
|
||||
|
|
@ -364,7 +364,7 @@ class QueryParam:
|
|||
max_total_tokens: int = int(os.getenv("MAX_TOTAL_TOKENS", "30000"))
|
||||
"""Maximum total tokens budget for the entire query context (entities + relations + chunks + system prompt)."""
|
||||
|
||||
# History mesages is only send to LLM for context, not used for retrieval
|
||||
# History messages are only sent to LLM for context, not used for retrieval
|
||||
conversation_history: list[dict[str, str]] = field(default_factory=list)
|
||||
"""Stores past conversation history to maintain context.
|
||||
Format: [{"role": "user/assistant", "content": "message"}].
|
||||
|
|
@ -403,6 +403,11 @@ LightRAG requires the utilization of LLM and Embedding models to accomplish docu
|
|||
* LightRAG also supports Open AI-like chat/embeddings APIs:
|
||||
|
||||
```python
|
||||
import os
|
||||
import numpy as np
|
||||
from lightrag.utils import wrap_embedding_func_with_attrs
|
||||
from lightrag.llm.openai import openai_complete_if_cache, openai_embed
|
||||
|
||||
async def llm_model_func(
|
||||
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
|
||||
) -> str:
|
||||
|
|
@ -416,8 +421,9 @@ async def llm_model_func(
|
|||
**kwargs
|
||||
)
|
||||
|
||||
@wrap_embedding_func_with_attrs(embedding_dim=4096, max_token_size=8192)
|
||||
async def embedding_func(texts: list[str]) -> np.ndarray:
|
||||
return await openai_embed(
|
||||
return await openai_embed.func(
|
||||
texts,
|
||||
model="solar-embedding-1-large-query",
|
||||
api_key=os.getenv("UPSTAGE_API_KEY"),
|
||||
|
|
@ -428,16 +434,17 @@ async def initialize_rag():
|
|||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=llm_model_func,
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=4096,
|
||||
func=embedding_func
|
||||
)
|
||||
embedding_func=embedding_func # Pass the decorated function directly
|
||||
)
|
||||
|
||||
await rag.initialize_storages()
|
||||
return rag
|
||||
```
|
||||
|
||||
> **Important Note on Embedding Function Wrapping:**
|
||||
>
|
||||
> `EmbeddingFunc` cannot be nested. Functions that have been decorated with `@wrap_embedding_func_with_attrs` (such as `openai_embed`, `ollama_embed`, etc.) cannot be wrapped again using `EmbeddingFunc()`. This is why we call `xxx_embed.func` (the underlying unwrapped function) instead of `xxx_embed` directly when creating custom embedding functions.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
|
|
@ -476,19 +483,20 @@ If you want to use Ollama models, you need to pull model you plan to use and emb
|
|||
Then you only need to set LightRAG as follows:
|
||||
|
||||
```python
|
||||
import numpy as np
|
||||
from lightrag.utils import wrap_embedding_func_with_attrs
|
||||
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
|
||||
|
||||
@wrap_embedding_func_with_attrs(embedding_dim=768, max_token_size=8192)
|
||||
async def embedding_func(texts: list[str]) -> np.ndarray:
|
||||
return await ollama_embed.func(texts, embed_model="nomic-embed-text")
|
||||
|
||||
# Initialize LightRAG with Ollama model
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=ollama_model_complete, # Use Ollama model for text generation
|
||||
llm_model_name='your_model_name', # Your model name
|
||||
# Use Ollama embedding function
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=768,
|
||||
func=lambda texts: ollama_embed(
|
||||
texts,
|
||||
embed_model="nomic-embed-text"
|
||||
)
|
||||
),
|
||||
embedding_func=embedding_func, # Pass the decorated function directly
|
||||
)
|
||||
```
|
||||
|
||||
|
|
@ -527,22 +535,27 @@ ollama create -f Modelfile qwen2m
|
|||
Tiy can use `llm_model_kwargs` param to configure ollama:
|
||||
|
||||
```python
|
||||
import numpy as np
|
||||
from lightrag.utils import wrap_embedding_func_with_attrs
|
||||
from lightrag.llm.ollama import ollama_model_complete, ollama_embed
|
||||
|
||||
@wrap_embedding_func_with_attrs(embedding_dim=768, max_token_size=8192)
|
||||
async def embedding_func(texts: list[str]) -> np.ndarray:
|
||||
return await ollama_embed.func(texts, embed_model="nomic-embed-text")
|
||||
|
||||
rag = LightRAG(
|
||||
working_dir=WORKING_DIR,
|
||||
llm_model_func=ollama_model_complete, # Use Ollama model for text generation
|
||||
llm_model_name='your_model_name', # Your model name
|
||||
llm_model_kwargs={"options": {"num_ctx": 32768}},
|
||||
# Use Ollama embedding function
|
||||
embedding_func=EmbeddingFunc(
|
||||
embedding_dim=768,
|
||||
func=lambda texts: ollama_embed(
|
||||
texts,
|
||||
embed_model="nomic-embed-text"
|
||||
)
|
||||
),
|
||||
embedding_func=embedding_func, # Pass the decorated function directly
|
||||
)
|
||||
```
|
||||
|
||||
> **Important Note on Embedding Function Wrapping:**
|
||||
>
|
||||
> `EmbeddingFunc` cannot be nested. Functions that have been decorated with `@wrap_embedding_func_with_attrs` (such as `openai_embed`, `ollama_embed`, etc.) cannot be wrapped again using `EmbeddingFunc()`. This is why we call `xxx_embed.func` (the underlying unwrapped function) instead of `xxx_embed` directly when creating custom embedding functions.
|
||||
|
||||
* **Low RAM GPUs**
|
||||
|
||||
In order to run this experiment on low RAM GPU you should select small model and tune context window (increasing context increase memory consumption). For example, running this ollama example on repurposed mining GPU with 6Gb of RAM required to set context size to 26k while using `gemma2:2b`. It was able to find 197 entities and 19 relations on `book.txt`.
|
||||
|
|
@ -1555,7 +1568,7 @@ Langfuse provides a drop-in replacement for the OpenAI client that automatically
|
|||
pip install lightrag-hku
|
||||
pip install lightrag-hku[observability]
|
||||
|
||||
# Or install from souce code with debug mode enabled
|
||||
# Or install from source code with debug mode enabled
|
||||
pip install -e .
|
||||
pip install -e ".[observability]"
|
||||
```
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
__api_version__ = "0256"
|
||||
__api_version__ = "0259"
|
||||
|
|
|
|||
|
|
@ -365,8 +365,12 @@ def parse_args() -> argparse.Namespace:
|
|||
|
||||
# Inject model configuration
|
||||
args.llm_model = get_env_value("LLM_MODEL", "mistral-nemo:latest")
|
||||
args.embedding_model = get_env_value("EMBEDDING_MODEL", "bge-m3:latest")
|
||||
args.embedding_dim = get_env_value("EMBEDDING_DIM", 1024, int)
|
||||
# EMBEDDING_MODEL defaults to None - each binding will use its own default model
|
||||
# e.g., OpenAI uses "text-embedding-3-small", Jina uses "jina-embeddings-v4"
|
||||
args.embedding_model = get_env_value("EMBEDDING_MODEL", None, special_none=True)
|
||||
# EMBEDDING_DIM defaults to None - each binding will use its own default dimension
|
||||
# Value is inherited from provider defaults via wrap_embedding_func_with_attrs decorator
|
||||
args.embedding_dim = get_env_value("EMBEDDING_DIM", None, int, special_none=True)
|
||||
args.embedding_send_dim = get_env_value("EMBEDDING_SEND_DIM", False, bool)
|
||||
|
||||
# Inject chunk configuration
|
||||
|
|
|
|||
|
|
@ -654,6 +654,17 @@ def create_app(args):
|
|||
2. Extracts max_token_size and embedding_dim from provider if it's an EmbeddingFunc
|
||||
3. Creates an optimized wrapper that calls the underlying function directly (avoiding double-wrapping)
|
||||
4. Returns a properly configured EmbeddingFunc instance
|
||||
|
||||
Configuration Rules:
|
||||
- When EMBEDDING_MODEL is not set: Uses provider's default model and dimension
|
||||
(e.g., jina-embeddings-v4 with 2048 dims, text-embedding-3-small with 1536 dims)
|
||||
- When EMBEDDING_MODEL is set to a custom model: User MUST also set EMBEDDING_DIM
|
||||
to match the custom model's dimension (e.g., for jina-embeddings-v3, set EMBEDDING_DIM=1024)
|
||||
|
||||
Note: The embedding_dim parameter is automatically injected by EmbeddingFunc wrapper
|
||||
when send_dimensions=True (enabled for Jina and Gemini bindings). This wrapper calls
|
||||
the underlying provider function directly (.func) to avoid double-wrapping, so we must
|
||||
explicitly pass embedding_dim to the provider's underlying function.
|
||||
"""
|
||||
|
||||
# Step 1: Import provider function and extract default attributes
|
||||
|
|
@ -713,6 +724,7 @@ def create_app(args):
|
|||
)
|
||||
|
||||
# Step 3: Create optimized embedding function (calls underlying function directly)
|
||||
# Note: When model is None, each binding will use its own default model
|
||||
async def optimized_embedding_function(texts, embedding_dim=None):
|
||||
try:
|
||||
if binding == "lollms":
|
||||
|
|
@ -724,9 +736,9 @@ def create_app(args):
|
|||
if isinstance(lollms_embed, EmbeddingFunc)
|
||||
else lollms_embed
|
||||
)
|
||||
return await actual_func(
|
||||
texts, embed_model=model, host=host, api_key=api_key
|
||||
)
|
||||
# lollms embed_model is not used (server uses configured vectorizer)
|
||||
# Only pass base_url and api_key
|
||||
return await actual_func(texts, base_url=host, api_key=api_key)
|
||||
elif binding == "ollama":
|
||||
from lightrag.llm.ollama import ollama_embed
|
||||
|
||||
|
|
@ -745,13 +757,16 @@ def create_app(args):
|
|||
|
||||
ollama_options = OllamaEmbeddingOptions.options_dict(args)
|
||||
|
||||
return await actual_func(
|
||||
texts,
|
||||
embed_model=model,
|
||||
host=host,
|
||||
api_key=api_key,
|
||||
options=ollama_options,
|
||||
)
|
||||
# Pass embed_model only if provided, let function use its default (bge-m3:latest)
|
||||
kwargs = {
|
||||
"texts": texts,
|
||||
"host": host,
|
||||
"api_key": api_key,
|
||||
"options": ollama_options,
|
||||
}
|
||||
if model:
|
||||
kwargs["embed_model"] = model
|
||||
return await actual_func(**kwargs)
|
||||
elif binding == "azure_openai":
|
||||
from lightrag.llm.azure_openai import azure_openai_embed
|
||||
|
||||
|
|
@ -760,7 +775,11 @@ def create_app(args):
|
|||
if isinstance(azure_openai_embed, EmbeddingFunc)
|
||||
else azure_openai_embed
|
||||
)
|
||||
return await actual_func(texts, model=model, api_key=api_key)
|
||||
# Pass model only if provided, let function use its default otherwise
|
||||
kwargs = {"texts": texts, "api_key": api_key}
|
||||
if model:
|
||||
kwargs["model"] = model
|
||||
return await actual_func(**kwargs)
|
||||
elif binding == "aws_bedrock":
|
||||
from lightrag.llm.bedrock import bedrock_embed
|
||||
|
||||
|
|
@ -769,7 +788,11 @@ def create_app(args):
|
|||
if isinstance(bedrock_embed, EmbeddingFunc)
|
||||
else bedrock_embed
|
||||
)
|
||||
return await actual_func(texts, model=model)
|
||||
# Pass model only if provided, let function use its default otherwise
|
||||
kwargs = {"texts": texts}
|
||||
if model:
|
||||
kwargs["model"] = model
|
||||
return await actual_func(**kwargs)
|
||||
elif binding == "jina":
|
||||
from lightrag.llm.jina import jina_embed
|
||||
|
||||
|
|
@ -778,12 +801,16 @@ def create_app(args):
|
|||
if isinstance(jina_embed, EmbeddingFunc)
|
||||
else jina_embed
|
||||
)
|
||||
return await actual_func(
|
||||
texts,
|
||||
embedding_dim=embedding_dim,
|
||||
base_url=host,
|
||||
api_key=api_key,
|
||||
)
|
||||
# Pass model only if provided, let function use its default (jina-embeddings-v4)
|
||||
kwargs = {
|
||||
"texts": texts,
|
||||
"embedding_dim": embedding_dim,
|
||||
"base_url": host,
|
||||
"api_key": api_key,
|
||||
}
|
||||
if model:
|
||||
kwargs["model"] = model
|
||||
return await actual_func(**kwargs)
|
||||
elif binding == "gemini":
|
||||
from lightrag.llm.gemini import gemini_embed
|
||||
|
||||
|
|
@ -801,14 +828,19 @@ def create_app(args):
|
|||
|
||||
gemini_options = GeminiEmbeddingOptions.options_dict(args)
|
||||
|
||||
return await actual_func(
|
||||
texts,
|
||||
model=model,
|
||||
base_url=host,
|
||||
api_key=api_key,
|
||||
embedding_dim=embedding_dim,
|
||||
task_type=gemini_options.get("task_type", "RETRIEVAL_DOCUMENT"),
|
||||
)
|
||||
# Pass model only if provided, let function use its default (gemini-embedding-001)
|
||||
kwargs = {
|
||||
"texts": texts,
|
||||
"base_url": host,
|
||||
"api_key": api_key,
|
||||
"embedding_dim": embedding_dim,
|
||||
"task_type": gemini_options.get(
|
||||
"task_type", "RETRIEVAL_DOCUMENT"
|
||||
),
|
||||
}
|
||||
if model:
|
||||
kwargs["model"] = model
|
||||
return await actual_func(**kwargs)
|
||||
else: # openai and compatible
|
||||
from lightrag.llm.openai import openai_embed
|
||||
|
||||
|
|
@ -817,13 +849,16 @@ def create_app(args):
|
|||
if isinstance(openai_embed, EmbeddingFunc)
|
||||
else openai_embed
|
||||
)
|
||||
return await actual_func(
|
||||
texts,
|
||||
model=model,
|
||||
base_url=host,
|
||||
api_key=api_key,
|
||||
embedding_dim=embedding_dim,
|
||||
)
|
||||
# Pass model only if provided, let function use its default (text-embedding-3-small)
|
||||
kwargs = {
|
||||
"texts": texts,
|
||||
"base_url": host,
|
||||
"api_key": api_key,
|
||||
"embedding_dim": embedding_dim,
|
||||
}
|
||||
if model:
|
||||
kwargs["model"] = model
|
||||
return await actual_func(**kwargs)
|
||||
except ImportError as e:
|
||||
raise Exception(f"Failed to import {binding} embedding: {e}")
|
||||
|
||||
|
|
|
|||
|
|
@ -24,7 +24,11 @@ from pydantic import BaseModel, Field, field_validator
|
|||
|
||||
from lightrag import LightRAG
|
||||
from lightrag.base import DeletionResult, DocProcessingStatus, DocStatus
|
||||
from lightrag.utils import generate_track_id
|
||||
from lightrag.utils import (
|
||||
generate_track_id,
|
||||
compute_mdhash_id,
|
||||
sanitize_text_for_encoding,
|
||||
)
|
||||
from lightrag.api.utils_api import get_combined_auth_dependency
|
||||
from ..config import global_args
|
||||
|
||||
|
|
@ -159,7 +163,7 @@ class ReprocessResponse(BaseModel):
|
|||
Attributes:
|
||||
status: Status of the reprocessing operation
|
||||
message: Message describing the operation result
|
||||
track_id: Tracking ID for monitoring reprocessing progress
|
||||
track_id: Always empty string. Reprocessed documents retain their original track_id.
|
||||
"""
|
||||
|
||||
status: Literal["reprocessing_started"] = Field(
|
||||
|
|
@ -167,7 +171,8 @@ class ReprocessResponse(BaseModel):
|
|||
)
|
||||
message: str = Field(description="Human-readable message describing the operation")
|
||||
track_id: str = Field(
|
||||
description="Tracking ID for monitoring reprocessing progress"
|
||||
default="",
|
||||
description="Always empty string. Reprocessed documents retain their original track_id from initial upload.",
|
||||
)
|
||||
|
||||
class Config:
|
||||
|
|
@ -175,7 +180,7 @@ class ReprocessResponse(BaseModel):
|
|||
"example": {
|
||||
"status": "reprocessing_started",
|
||||
"message": "Reprocessing of failed documents has been initiated in background",
|
||||
"track_id": "retry_20250729_170612_def456",
|
||||
"track_id": "",
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2097,12 +2102,14 @@ def create_document_routes(
|
|||
# Check if filename already exists in doc_status storage
|
||||
existing_doc_data = await rag.doc_status.get_doc_by_file_path(safe_filename)
|
||||
if existing_doc_data:
|
||||
# Get document status information for error message
|
||||
# Get document status and track_id from existing document
|
||||
status = existing_doc_data.get("status", "unknown")
|
||||
# Use `or ""` to handle both missing key and None value (e.g., legacy rows without track_id)
|
||||
existing_track_id = existing_doc_data.get("track_id") or ""
|
||||
return InsertResponse(
|
||||
status="duplicated",
|
||||
message=f"File '{safe_filename}' already exists in document storage (Status: {status}).",
|
||||
track_id="",
|
||||
track_id=existing_track_id,
|
||||
)
|
||||
|
||||
file_path = doc_manager.input_dir / safe_filename
|
||||
|
|
@ -2166,14 +2173,30 @@ def create_document_routes(
|
|||
request.file_source
|
||||
)
|
||||
if existing_doc_data:
|
||||
# Get document status information for error message
|
||||
# Get document status and track_id from existing document
|
||||
status = existing_doc_data.get("status", "unknown")
|
||||
# Use `or ""` to handle both missing key and None value (e.g., legacy rows without track_id)
|
||||
existing_track_id = existing_doc_data.get("track_id") or ""
|
||||
return InsertResponse(
|
||||
status="duplicated",
|
||||
message=f"File source '{request.file_source}' already exists in document storage (Status: {status}).",
|
||||
track_id="",
|
||||
track_id=existing_track_id,
|
||||
)
|
||||
|
||||
# Check if content already exists by computing content hash (doc_id)
|
||||
sanitized_text = sanitize_text_for_encoding(request.text)
|
||||
content_doc_id = compute_mdhash_id(sanitized_text, prefix="doc-")
|
||||
existing_doc = await rag.doc_status.get_by_id(content_doc_id)
|
||||
if existing_doc:
|
||||
# Content already exists, return duplicated with existing track_id
|
||||
status = existing_doc.get("status", "unknown")
|
||||
existing_track_id = existing_doc.get("track_id") or ""
|
||||
return InsertResponse(
|
||||
status="duplicated",
|
||||
message=f"Identical content already exists in document storage (doc_id: {content_doc_id}, Status: {status}).",
|
||||
track_id=existing_track_id,
|
||||
)
|
||||
|
||||
# Generate track_id for text insertion
|
||||
track_id = generate_track_id("insert")
|
||||
|
||||
|
|
@ -2232,14 +2255,31 @@ def create_document_routes(
|
|||
file_source
|
||||
)
|
||||
if existing_doc_data:
|
||||
# Get document status information for error message
|
||||
# Get document status and track_id from existing document
|
||||
status = existing_doc_data.get("status", "unknown")
|
||||
# Use `or ""` to handle both missing key and None value (e.g., legacy rows without track_id)
|
||||
existing_track_id = existing_doc_data.get("track_id") or ""
|
||||
return InsertResponse(
|
||||
status="duplicated",
|
||||
message=f"File source '{file_source}' already exists in document storage (Status: {status}).",
|
||||
track_id="",
|
||||
track_id=existing_track_id,
|
||||
)
|
||||
|
||||
# Check if any content already exists by computing content hash (doc_id)
|
||||
for text in request.texts:
|
||||
sanitized_text = sanitize_text_for_encoding(text)
|
||||
content_doc_id = compute_mdhash_id(sanitized_text, prefix="doc-")
|
||||
existing_doc = await rag.doc_status.get_by_id(content_doc_id)
|
||||
if existing_doc:
|
||||
# Content already exists, return duplicated with existing track_id
|
||||
status = existing_doc.get("status", "unknown")
|
||||
existing_track_id = existing_doc.get("track_id") or ""
|
||||
return InsertResponse(
|
||||
status="duplicated",
|
||||
message=f"Identical content already exists in document storage (doc_id: {content_doc_id}, Status: {status}).",
|
||||
track_id=existing_track_id,
|
||||
)
|
||||
|
||||
# Generate track_id for texts insertion
|
||||
track_id = generate_track_id("insert")
|
||||
|
||||
|
|
@ -3058,29 +3098,27 @@ def create_document_routes(
|
|||
This is useful for recovering from server crashes, network errors, LLM service
|
||||
outages, or other temporary failures that caused document processing to fail.
|
||||
|
||||
The processing happens in the background and can be monitored using the
|
||||
returned track_id or by checking the pipeline status.
|
||||
The processing happens in the background and can be monitored by checking the
|
||||
pipeline status. The reprocessed documents retain their original track_id from
|
||||
initial upload, so use their original track_id to monitor progress.
|
||||
|
||||
Returns:
|
||||
ReprocessResponse: Response with status, message, and track_id
|
||||
ReprocessResponse: Response with status and message.
|
||||
track_id is always empty string because reprocessed documents retain
|
||||
their original track_id from initial upload.
|
||||
|
||||
Raises:
|
||||
HTTPException: If an error occurs while initiating reprocessing (500).
|
||||
"""
|
||||
try:
|
||||
# Generate track_id with "retry" prefix for retry operation
|
||||
track_id = generate_track_id("retry")
|
||||
|
||||
# Start the reprocessing in the background
|
||||
# Note: Reprocessed documents retain their original track_id from initial upload
|
||||
background_tasks.add_task(rag.apipeline_process_enqueue_documents)
|
||||
logger.info(
|
||||
f"Reprocessing of failed documents initiated with track_id: {track_id}"
|
||||
)
|
||||
logger.info("Reprocessing of failed documents initiated")
|
||||
|
||||
return ReprocessResponse(
|
||||
status="reprocessing_started",
|
||||
message="Reprocessing of failed documents has been initiated in background",
|
||||
track_id=track_id,
|
||||
message="Reprocessing of failed documents has been initiated in background. Documents retain their original track_id.",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
|
|
|
|||
|
|
@ -100,7 +100,7 @@ def main():
|
|||
print("\nHow to fix:")
|
||||
print(" Option 1 - Set environment variable before starting (recommended):")
|
||||
print(" export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES")
|
||||
print(" lightrag-server")
|
||||
print(" lightrag-gunicorn --workers 2")
|
||||
print("\n Option 2 - Add to your shell profile (~/.zshrc or ~/.bash_profile):")
|
||||
print(" echo 'export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES' >> ~/.zshrc")
|
||||
print(" source ~/.zshrc")
|
||||
|
|
|
|||
|
|
@ -383,7 +383,7 @@ class PostgreSQLDB:
|
|||
async def configure_age_extension(connection: asyncpg.Connection) -> None:
|
||||
"""Create AGE extension if it doesn't exist for graph operations."""
|
||||
try:
|
||||
await connection.execute("CREATE EXTENSION IF NOT EXISTS age") # type: ignore
|
||||
await connection.execute("CREATE EXTENSION IF NOT EXISTS AGE CASCADE") # type: ignore
|
||||
logger.info("PostgreSQL, AGE extension enabled")
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not create AGE extension: {e}")
|
||||
|
|
|
|||
|
|
@ -69,6 +69,7 @@ async def fetch_data(url, headers, data):
|
|||
)
|
||||
async def jina_embed(
|
||||
texts: list[str],
|
||||
model: str = "jina-embeddings-v4",
|
||||
embedding_dim: int = 2048,
|
||||
late_chunking: bool = False,
|
||||
base_url: str = None,
|
||||
|
|
@ -78,6 +79,8 @@ async def jina_embed(
|
|||
|
||||
Args:
|
||||
texts: List of texts to embed.
|
||||
model: The Jina embedding model to use (default: jina-embeddings-v4).
|
||||
Supported models: jina-embeddings-v3, jina-embeddings-v4, etc.
|
||||
embedding_dim: The embedding dimensions (default: 2048 for jina-embeddings-v4).
|
||||
**IMPORTANT**: This parameter is automatically injected by the EmbeddingFunc wrapper.
|
||||
Do NOT manually pass this parameter when calling the function directly.
|
||||
|
|
@ -107,7 +110,7 @@ async def jina_embed(
|
|||
"Authorization": f"Bearer {os.environ['JINA_API_KEY']}",
|
||||
}
|
||||
data = {
|
||||
"model": "jina-embeddings-v4",
|
||||
"model": model,
|
||||
"task": "text-matching",
|
||||
"dimensions": embedding_dim,
|
||||
"embedding_type": "base64",
|
||||
|
|
|
|||
|
|
@ -173,7 +173,9 @@ async def ollama_model_complete(
|
|||
|
||||
|
||||
@wrap_embedding_func_with_attrs(embedding_dim=1024, max_token_size=8192)
|
||||
async def ollama_embed(texts: list[str], embed_model, **kwargs) -> np.ndarray:
|
||||
async def ollama_embed(
|
||||
texts: list[str], embed_model: str = "bge-m3:latest", **kwargs
|
||||
) -> np.ndarray:
|
||||
api_key = kwargs.pop("api_key", None)
|
||||
if not api_key:
|
||||
api_key = os.getenv("OLLAMA_API_KEY")
|
||||
|
|
|
|||
|
|
@ -867,7 +867,7 @@ async def azure_openai_complete(
|
|||
return result
|
||||
|
||||
|
||||
@wrap_embedding_func_with_attrs(embedding_dim=1536)
|
||||
@wrap_embedding_func_with_attrs(embedding_dim=1536, max_token_size=8192)
|
||||
async def azure_openai_embed(
|
||||
texts: list[str],
|
||||
model: str | None = None,
|
||||
|
|
|
|||
|
|
@ -397,8 +397,8 @@ async def _handle_single_entity_extraction(
|
|||
|
||||
# Validate entity name after all cleaning steps
|
||||
if not entity_name or not entity_name.strip():
|
||||
logger.warning(
|
||||
f"Entity extraction error: entity name became empty after cleaning. Original: '{record_attributes[1]}'"
|
||||
logger.info(
|
||||
f"Empty entity name found after sanitization. Original: '{record_attributes[1]}'"
|
||||
)
|
||||
return None
|
||||
|
||||
|
|
@ -474,14 +474,14 @@ async def _handle_single_relationship_extraction(
|
|||
|
||||
# Validate entity names after all cleaning steps
|
||||
if not source:
|
||||
logger.warning(
|
||||
f"Relationship extraction error: source entity became empty after cleaning. Original: '{record_attributes[1]}'"
|
||||
logger.info(
|
||||
f"Empty source entity found after sanitization. Original: '{record_attributes[1]}'"
|
||||
)
|
||||
return None
|
||||
|
||||
if not target:
|
||||
logger.warning(
|
||||
f"Relationship extraction error: target entity became empty after cleaning. Original: '{record_attributes[2]}'"
|
||||
logger.info(
|
||||
f"Empty target entity found after sanitization. Original: '{record_attributes[2]}'"
|
||||
)
|
||||
return None
|
||||
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -16,16 +16,16 @@
|
|||
"preview-no-bun": "vite preview"
|
||||
},
|
||||
"dependencies": {
|
||||
"@faker-js/faker": "^9.9.0",
|
||||
"@faker-js/faker": "^10.1.0",
|
||||
"@radix-ui/react-alert-dialog": "^1.1.15",
|
||||
"@radix-ui/react-checkbox": "^1.3.3",
|
||||
"@radix-ui/react-dialog": "^1.1.15",
|
||||
"@radix-ui/react-popover": "^1.1.15",
|
||||
"@radix-ui/react-progress": "^1.1.7",
|
||||
"@radix-ui/react-progress": "^1.1.8",
|
||||
"@radix-ui/react-scroll-area": "^1.2.10",
|
||||
"@radix-ui/react-select": "^2.2.6",
|
||||
"@radix-ui/react-separator": "^1.1.7",
|
||||
"@radix-ui/react-slot": "^1.2.3",
|
||||
"@radix-ui/react-separator": "^1.1.8",
|
||||
"@radix-ui/react-slot": "^1.2.4",
|
||||
"@radix-ui/react-tabs": "^1.1.13",
|
||||
"@radix-ui/react-tooltip": "^1.2.8",
|
||||
"@radix-ui/react-use-controllable-state": "^1.2.2",
|
||||
|
|
@ -41,7 +41,7 @@
|
|||
"@sigma/edge-curve": "^3.1.0",
|
||||
"@sigma/node-border": "^3.0.0",
|
||||
"@tanstack/react-table": "^8.21.3",
|
||||
"axios": "^1.12.2",
|
||||
"axios": "^1.13.2",
|
||||
"class-variance-authority": "^0.7.1",
|
||||
"clsx": "^2.1.1",
|
||||
"cmdk": "^1.1.1",
|
||||
|
|
@ -51,21 +51,21 @@
|
|||
"graphology-layout-force": "^0.2.4",
|
||||
"graphology-layout-forceatlas2": "^0.10.1",
|
||||
"graphology-layout-noverlap": "^0.4.2",
|
||||
"i18next": "^24.2.3",
|
||||
"katex": "^0.16.23",
|
||||
"lucide-react": "^0.475.0",
|
||||
"mermaid": "^11.12.0",
|
||||
"i18next": "^25.6.3",
|
||||
"katex": "^0.16.25",
|
||||
"mermaid": "^11.12.1",
|
||||
"lucide-react": "^0.554.0",
|
||||
"minisearch": "^7.2.0",
|
||||
"react": "^19.2.0",
|
||||
"react-dom": "^19.2.0",
|
||||
"react-dropzone": "^14.3.8",
|
||||
"react-error-boundary": "^5.0.0",
|
||||
"react-i18next": "^15.7.4",
|
||||
"react-markdown": "^9.1.0",
|
||||
"react-error-boundary": "^6.0.0",
|
||||
"react-i18next": "^16.3.5",
|
||||
"react-markdown": "^10.1.0",
|
||||
"react-number-format": "^5.4.4",
|
||||
"react-router-dom": "^7.9.4",
|
||||
"react-router-dom": "^7.9.6",
|
||||
"react-select": "^5.10.2",
|
||||
"react-syntax-highlighter": "^15.6.6",
|
||||
"react-syntax-highlighter": "^16.1.0",
|
||||
"rehype-katex": "^7.0.1",
|
||||
"rehype-raw": "^7.0.0",
|
||||
"rehype-react": "^8.0.0",
|
||||
|
|
@ -73,40 +73,40 @@
|
|||
"remark-math": "^6.0.0",
|
||||
"seedrandom": "^3.0.5",
|
||||
"sigma": "^3.0.2",
|
||||
"sonner": "^1.7.4",
|
||||
"tailwind-merge": "^3.3.1",
|
||||
"sonner": "^2.0.7",
|
||||
"tailwind-merge": "^3.4.0",
|
||||
"tailwind-scrollbar": "^4.0.2",
|
||||
"typography": "^0.16.24",
|
||||
"unist-util-visit": "^5.0.0",
|
||||
"zustand": "^5.0.8"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@eslint/js": "^9.37.0",
|
||||
"@stylistic/eslint-plugin-js": "^3.1.0",
|
||||
"@tailwindcss/vite": "^4.1.14",
|
||||
"@types/bun": "^1.2.23",
|
||||
"@eslint/js": "^9.39.1",
|
||||
"@stylistic/eslint-plugin-js": "^4.4.1",
|
||||
"@types/bun": "^1.3.3",
|
||||
"@tailwindcss/vite": "^4.1.17",
|
||||
"@types/katex": "^0.16.7",
|
||||
"@types/node": "^22.18.9",
|
||||
"@types/node": "^24.10.1",
|
||||
"@tailwindcss/typography": "^0.5.15",
|
||||
"@types/react": "^19.2.2",
|
||||
"@types/react-dom": "^19.2.1",
|
||||
"@types/react": "^19.2.7",
|
||||
"@types/react-dom": "^19.2.3",
|
||||
"@types/react-i18next": "^8.1.0",
|
||||
"@types/react-syntax-highlighter": "^15.5.13",
|
||||
"@types/seedrandom": "^3.0.8",
|
||||
"@vitejs/plugin-react-swc": "^3.11.0",
|
||||
"eslint": "^9.37.0",
|
||||
"@vitejs/plugin-react-swc": "^4.2.2",
|
||||
"eslint": "^9.39.1",
|
||||
"eslint-config-prettier": "^10.1.8",
|
||||
"eslint-plugin-react": "^7.37.5",
|
||||
"eslint-plugin-react-hooks": "^5.2.0",
|
||||
"eslint-plugin-react-refresh": "^0.4.23",
|
||||
"globals": "^15.15.0",
|
||||
"eslint-plugin-react-hooks": "^7.0.1",
|
||||
"eslint-plugin-react-refresh": "^0.4.24",
|
||||
"globals": "^16.5.0",
|
||||
"graphology-types": "^0.24.8",
|
||||
"prettier": "^3.6.2",
|
||||
"prettier-plugin-tailwindcss": "^0.6.14",
|
||||
"tailwindcss": "^4.1.14",
|
||||
"prettier-plugin-tailwindcss": "^0.7.1",
|
||||
"typescript-eslint": "^8.48.0",
|
||||
"tailwindcss": "^4.1.17",
|
||||
"tailwindcss-animate": "^1.0.7",
|
||||
"typescript": "~5.7.3",
|
||||
"typescript-eslint": "^8.46.0",
|
||||
"vite": "^6.3.6"
|
||||
"typescript": "~5.9.3",
|
||||
"vite": "^7.2.4"
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -76,7 +76,8 @@ export const ChatMessage = ({
|
|||
? message.content
|
||||
: (displayContent !== undefined ? displayContent : (message.content || ''))
|
||||
|
||||
// Load KaTeX dynamically
|
||||
// Load KaTeX rehype plugin dynamically
|
||||
// Note: KaTeX extensions (mhchem, copy-tex) are imported statically in main.tsx
|
||||
useEffect(() => {
|
||||
const loadKaTeX = async () => {
|
||||
try {
|
||||
|
|
@ -84,7 +85,6 @@ export const ChatMessage = ({
|
|||
setKatexPlugin(() => rehypeKatex);
|
||||
} catch (error) {
|
||||
console.error('Failed to load KaTeX plugin:', error);
|
||||
// Set to null to ensure we don't try to use a failed plugin
|
||||
setKatexPlugin(null);
|
||||
}
|
||||
};
|
||||
|
|
|
|||
|
|
@ -4,6 +4,9 @@ import './index.css'
|
|||
import AppRouter from './AppRouter'
|
||||
import './i18n.ts';
|
||||
import 'katex/dist/katex.min.css';
|
||||
// Import KaTeX extensions at app startup to ensure they are registered before any rendering
|
||||
import 'katex/contrib/mhchem'; // Chemistry formulas: \ce{} and \pu{}
|
||||
import 'katex/contrib/copy-tex'; // Allow copying rendered formulas as LaTeX source
|
||||
|
||||
|
||||
|
||||
|
|
|
|||
1
lightrag_webui/src/types/katex.d.ts
vendored
1
lightrag_webui/src/types/katex.d.ts
vendored
|
|
@ -1 +1,2 @@
|
|||
declare module 'katex/contrib/mhchem';
|
||||
declare module 'katex/contrib/copy-tex';
|
||||
|
|
|
|||
|
|
@ -10,7 +10,10 @@ export default defineConfig({
|
|||
resolve: {
|
||||
alias: {
|
||||
'@': path.resolve(__dirname, './src')
|
||||
}
|
||||
},
|
||||
// Force all modules to use the same katex instance
|
||||
// This ensures mhchem extension registered in main.tsx is available to rehype-katex
|
||||
dedupe: ['katex']
|
||||
},
|
||||
// base: import.meta.env.VITE_BASE_URL || '/webui/',
|
||||
base: webuiPrefix,
|
||||
|
|
|
|||
|
|
@ -23,7 +23,6 @@ classifiers = [
|
|||
dependencies = [
|
||||
"aiohttp",
|
||||
"configparser",
|
||||
"future",
|
||||
"google-api-core>=2.0.0,<3.0.0",
|
||||
"google-genai>=1.0.0,<2.0.0",
|
||||
"json_repair",
|
||||
|
|
@ -54,7 +53,6 @@ api = [
|
|||
# Core dependencies
|
||||
"aiohttp",
|
||||
"configparser",
|
||||
"future",
|
||||
"json_repair",
|
||||
"nano-vectordb",
|
||||
"networkx",
|
||||
|
|
@ -78,9 +76,9 @@ api = [
|
|||
"distro",
|
||||
"fastapi",
|
||||
"httpcore",
|
||||
"httpx",
|
||||
"httpx>=0.28.1",
|
||||
"jiter",
|
||||
"passlib[bcrypt]",
|
||||
"bcrypt>=4.0.0",
|
||||
"psutil",
|
||||
"PyJWT>=2.8.0,<3.0.0",
|
||||
"python-jose[cryptography]",
|
||||
|
|
@ -132,16 +130,18 @@ offline = [
|
|||
"lightrag-hku[api,offline-storage,offline-llm]",
|
||||
]
|
||||
|
||||
evaluation = [
|
||||
# Test framework dependencies
|
||||
test = [
|
||||
"lightrag-hku[api]",
|
||||
"pytest>=8.4.2",
|
||||
"pytest-asyncio>=1.2.0",
|
||||
"pre-commit",
|
||||
"ruff",
|
||||
# RAG evaluation dependencies (RAGAS framework)
|
||||
]
|
||||
|
||||
evaluation = [
|
||||
"lightrag-hku[api]",
|
||||
"ragas>=0.3.7",
|
||||
"datasets>=4.3.0",
|
||||
"httpx>=0.28.1",
|
||||
]
|
||||
|
||||
observability = [
|
||||
|
|
|
|||
34
uv.lock
generated
34
uv.lock
generated
|
|
@ -1334,15 +1334,6 @@ http = [
|
|||
{ name = "aiohttp" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "future"
|
||||
version = "1.0.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a7/b2/4140c69c6a66432916b26158687e821ba631a4c9273c474343badf84d3ba/future-1.0.0.tar.gz", hash = "sha256:bd2968309307861edae1458a4f8a4f3598c03be43b97521076aebf5d94c07b05", size = 1228490, upload-time = "2024-02-21T11:52:38.461Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/da/71/ae30dadffc90b9006d77af76b393cb9dfbfc9629f339fc1574a1c52e6806/future-1.0.0-py3-none-any.whl", hash = "sha256:929292d34f5872e70396626ef385ec22355a1fae8ad29e1a734c3e43f9fbc216", size = 491326, upload-time = "2024-02-21T11:52:35.956Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "gitdb"
|
||||
version = "4.0.12"
|
||||
|
|
@ -2542,7 +2533,6 @@ source = { editable = "." }
|
|||
dependencies = [
|
||||
{ name = "aiohttp" },
|
||||
{ name = "configparser" },
|
||||
{ name = "future" },
|
||||
{ name = "google-api-core" },
|
||||
{ name = "google-genai" },
|
||||
{ name = "json-repair" },
|
||||
|
|
@ -2567,10 +2557,10 @@ api = [
|
|||
{ name = "aiohttp" },
|
||||
{ name = "ascii-colors" },
|
||||
{ name = "asyncpg" },
|
||||
{ name = "bcrypt" },
|
||||
{ name = "configparser" },
|
||||
{ name = "distro" },
|
||||
{ name = "fastapi" },
|
||||
{ name = "future" },
|
||||
{ name = "google-api-core" },
|
||||
{ name = "google-genai" },
|
||||
{ name = "gunicorn" },
|
||||
|
|
@ -2585,7 +2575,6 @@ api = [
|
|||
{ name = "openai" },
|
||||
{ name = "openpyxl" },
|
||||
{ name = "pandas" },
|
||||
{ name = "passlib", extra = ["bcrypt"] },
|
||||
{ name = "pipmaster" },
|
||||
{ name = "psutil" },
|
||||
{ name = "pycryptodome" },
|
||||
|
|
@ -2627,10 +2616,10 @@ offline = [
|
|||
{ name = "anthropic" },
|
||||
{ name = "ascii-colors" },
|
||||
{ name = "asyncpg" },
|
||||
{ name = "bcrypt" },
|
||||
{ name = "configparser" },
|
||||
{ name = "distro" },
|
||||
{ name = "fastapi" },
|
||||
{ name = "future" },
|
||||
{ name = "google-api-core" },
|
||||
{ name = "google-genai" },
|
||||
{ name = "gunicorn" },
|
||||
|
|
@ -2648,7 +2637,6 @@ offline = [
|
|||
{ name = "openai" },
|
||||
{ name = "openpyxl" },
|
||||
{ name = "pandas" },
|
||||
{ name = "passlib", extra = ["bcrypt"] },
|
||||
{ name = "pipmaster" },
|
||||
{ name = "psutil" },
|
||||
{ name = "pycryptodome" },
|
||||
|
|
@ -2714,14 +2702,13 @@ requires-dist = [
|
|||
{ name = "ascii-colors", marker = "extra == 'api'" },
|
||||
{ name = "asyncpg", marker = "extra == 'api'" },
|
||||
{ name = "asyncpg", marker = "extra == 'offline-storage'", specifier = ">=0.29.0,<1.0.0" },
|
||||
{ name = "bcrypt", marker = "extra == 'api'", specifier = ">=4.0.0" },
|
||||
{ name = "configparser" },
|
||||
{ name = "configparser", marker = "extra == 'api'" },
|
||||
{ name = "datasets", marker = "extra == 'evaluation'", specifier = ">=4.3.0" },
|
||||
{ name = "distro", marker = "extra == 'api'" },
|
||||
{ name = "docling", marker = "sys_platform != 'darwin' and extra == 'docling'", specifier = ">=2.0.0,<3.0.0" },
|
||||
{ name = "fastapi", marker = "extra == 'api'" },
|
||||
{ name = "future" },
|
||||
{ name = "future", marker = "extra == 'api'" },
|
||||
{ name = "google-api-core", specifier = ">=2.0.0,<3.0.0" },
|
||||
{ name = "google-api-core", marker = "extra == 'api'", specifier = ">=2.0.0,<3.0.0" },
|
||||
{ name = "google-api-core", marker = "extra == 'offline-llm'", specifier = ">=2.0.0,<3.0.0" },
|
||||
|
|
@ -2751,7 +2738,6 @@ requires-dist = [
|
|||
{ name = "openpyxl", marker = "extra == 'api'", specifier = ">=3.0.0,<4.0.0" },
|
||||
{ name = "pandas", specifier = ">=2.0.0,<2.4.0" },
|
||||
{ name = "pandas", marker = "extra == 'api'", specifier = ">=2.0.0,<2.4.0" },
|
||||
{ name = "passlib", extras = ["bcrypt"], marker = "extra == 'api'" },
|
||||
{ name = "pipmaster" },
|
||||
{ name = "pipmaster", marker = "extra == 'api'" },
|
||||
{ name = "pre-commit", marker = "extra == 'evaluation'" },
|
||||
|
|
@ -4110,20 +4096,6 @@ wheels = [
|
|||
{ url = "https://files.pythonhosted.org/packages/ab/5f/b38085618b950b79d2d9164a711c52b10aefc0ae6833b96f626b7021b2ed/pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a", size = 13098436, upload-time = "2024-09-20T13:09:48.112Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "passlib"
|
||||
version = "1.7.4"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b6/06/9da9ee59a67fae7761aab3ccc84fa4f3f33f125b370f1ccdb915bf967c11/passlib-1.7.4.tar.gz", hash = "sha256:defd50f72b65c5402ab2c573830a6978e5f202ad0d984793c8dde2c4152ebe04", size = 689844, upload-time = "2020-10-08T19:00:52.121Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/3b/a4/ab6b7589382ca3df236e03faa71deac88cae040af60c071a78d254a62172/passlib-1.7.4-py2.py3-none-any.whl", hash = "sha256:aa6bca462b8d8bda89c70b382f0c298a20b5560af6cbfa2dce410c0a2fb669f1", size = 525554, upload-time = "2020-10-08T19:00:49.856Z" },
|
||||
]
|
||||
|
||||
[package.optional-dependencies]
|
||||
bcrypt = [
|
||||
{ name = "bcrypt" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pillow"
|
||||
version = "11.3.0"
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue