diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 00000000..62ffb6b5
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,167 @@
+# Keep GitHub Actions up to date with GitHub's Dependabot...
+# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/keeping-your-actions-up-to-date-with-dependabot
+# https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem
+version: 2
+updates:
+ # Enable version updates for GitHub Actions
+ # Workflow files stored in the default location of `.github/workflows`
+ - package-ecosystem: github-actions
+ directory: /
+ groups:
+ github-actions:
+ patterns:
+ - "*" # Group all Actions updates into a single larger pull request
+ schedule:
+ interval: weekly
+ labels:
+ - "dependencies"
+ - "github-actions"
+ open-pull-requests-limit: 5
+
+ # Configuration for pip (Python dependencies)
+ - package-ecosystem: "pip"
+ directory: "/"
+ schedule:
+ interval: "weekly"
+ cooldown:
+ default-days: 5
+ semver-major-days: 30
+ semver-minor-days: 7
+ semver-patch-days: 3
+ groups:
+ # Core dependencies - LLM providers and embeddings
+ llm-providers:
+ patterns:
+ - "openai"
+ - "anthropic"
+ - "google-*"
+ - "boto3"
+ - "botocore"
+ - "ollama"
+ update-types:
+ - "minor"
+ - "patch"
+ # Storage backends
+ storage:
+ patterns:
+ - "neo4j"
+ - "pymongo"
+ - "redis"
+ - "psycopg*"
+ - "asyncpg"
+ - "milvus*"
+ - "qdrant*"
+ update-types:
+ - "minor"
+ - "patch"
+ # Data processing and ML
+ data-processing:
+ patterns:
+ - "numpy"
+ - "scipy"
+ - "pandas"
+ - "tiktoken"
+ - "transformers"
+ - "torch*"
+ update-types:
+ - "minor"
+ - "patch"
+ # Web framework and API
+ web-framework:
+ patterns:
+ - "fastapi"
+ - "uvicorn"
+ - "gunicorn"
+ - "starlette"
+ - "pydantic*"
+ update-types:
+ - "minor"
+ - "patch"
+ # Development and testing tools
+ dev-tools:
+ patterns:
+ - "pytest*"
+ - "ruff"
+ - "pre-commit"
+ - "black"
+ - "mypy"
+ update-types:
+ - "minor"
+ - "patch"
+ # Minor and patch updates for everything else
+ python-minor-patch:
+ patterns:
+ - "*"
+ update-types:
+ - "minor"
+ - "patch"
+ labels:
+ - "dependencies"
+ - "python"
+ open-pull-requests-limit: 10
+
+ # Configuration for bun (Frontend dependencies)
+ - package-ecosystem: "bun"
+ directory: "/lightrag_webui"
+ schedule:
+ interval: "weekly"
+ cooldown:
+ default-days: 5
+ semver-major-days: 30
+ semver-minor-days: 7
+ semver-patch-days: 3
+ groups:
+ # React ecosystem
+ react:
+ patterns:
+ - "react"
+ - "react-dom"
+ - "react-router*"
+ - "@types/react*"
+ update-types:
+ - "minor"
+ - "patch"
+ # UI components and styling
+ ui-components:
+ patterns:
+ - "@radix-ui/*"
+ - "tailwind*"
+ - "@tailwindcss/*"
+ - "lucide-react"
+ - "class-variance-authority"
+ - "clsx"
+ update-types:
+ - "minor"
+ - "patch"
+ # Graph visualization
+ graph-viz:
+ patterns:
+ - "sigma"
+ - "@sigma/*"
+ - "graphology*"
+ update-types:
+ - "minor"
+ - "patch"
+ # Build tools and dev dependencies
+ build-tools:
+ patterns:
+ - "vite"
+ - "@vitejs/*"
+ - "typescript"
+ - "eslint*"
+ - "@eslint/*"
+ - "prettier"
+ update-types:
+ - "minor"
+ - "patch"
+ # All other minor and patch updates
+ frontend-minor-patch:
+ patterns:
+ - "*"
+ update-types:
+ - "minor"
+ - "patch"
+ labels:
+ - "dependencies"
+ - "frontend"
+ open-pull-requests-limit: 10
diff --git a/.github/workflows/copilot-setup-steps.yml b/.github/workflows/copilot-setup-steps.yml
index 6b946ed1..618b80b4 100644
--- a/.github/workflows/copilot-setup-steps.yml
+++ b/.github/workflows/copilot-setup-steps.yml
@@ -23,10 +23,10 @@ jobs:
# If you do not check out your code, Copilot will do this for you.
steps:
- name: Checkout code
- uses: actions/checkout@v5
+ uses: actions/checkout@v6
- name: Set up Python 3.11
- uses: actions/setup-python@v5
+ uses: actions/setup-python@v6
with:
python-version: '3.11'
diff --git a/.github/workflows/docker-build-lite.yml b/.github/workflows/docker-build-lite.yml
index 9cbe6289..76ae4cce 100644
--- a/.github/workflows/docker-build-lite.yml
+++ b/.github/workflows/docker-build-lite.yml
@@ -18,7 +18,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
- uses: actions/checkout@v4
+ uses: actions/checkout@v6
with:
fetch-depth: 0
@@ -66,7 +66,7 @@ jobs:
type=raw,value=lite
- name: Build and push lite Docker image
- uses: docker/build-push-action@v5
+ uses: docker/build-push-action@v6
with:
context: .
file: ./Dockerfile.lite
diff --git a/.github/workflows/docker-build-manual.yml b/.github/workflows/docker-build-manual.yml
index de459d5a..780a288d 100644
--- a/.github/workflows/docker-build-manual.yml
+++ b/.github/workflows/docker-build-manual.yml
@@ -18,7 +18,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
- uses: actions/checkout@v4
+ uses: actions/checkout@v6
with:
fetch-depth: 0 # Fetch all history for tags
@@ -61,7 +61,7 @@ jobs:
type=raw,value=${{ steps.get_tag.outputs.tag }}
- name: Build and push Docker image
- uses: docker/build-push-action@v5
+ uses: docker/build-push-action@v6
with:
context: .
file: ./Dockerfile
diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml
index 6c290d59..d0b1a305 100644
--- a/.github/workflows/docker-publish.yml
+++ b/.github/workflows/docker-publish.yml
@@ -14,7 +14,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
- uses: actions/checkout@v4
+ uses: actions/checkout@v6
with:
fetch-depth: 0 # Fetch all history for tags
@@ -63,7 +63,7 @@ jobs:
type=raw,value=latest,enable=${{ steps.check_prerelease.outputs.is_prerelease == 'false' }}
- name: Build and push Docker image
- uses: docker/build-push-action@v5
+ uses: docker/build-push-action@v6
with:
context: .
file: ./Dockerfile
diff --git a/.github/workflows/linting.yaml b/.github/workflows/linting.yaml
index aa054369..499e4ce5 100644
--- a/.github/workflows/linting.yaml
+++ b/.github/workflows/linting.yaml
@@ -10,14 +10,15 @@ on:
jobs:
lint-and-format:
+ name: Linting and Formatting
runs-on: ubuntu-latest
steps:
- name: Checkout code
- uses: actions/checkout@v2
+ uses: actions/checkout@v6
- name: Set up Python
- uses: actions/setup-python@v2
+ uses: actions/setup-python@v6
with:
python-version: '3.x'
diff --git a/.github/workflows/pypi-publish.yml b/.github/workflows/pypi-publish.yml
index 14c2bcc5..3539d8f6 100644
--- a/.github/workflows/pypi-publish.yml
+++ b/.github/workflows/pypi-publish.yml
@@ -13,13 +13,13 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
with:
fetch-depth: 0 # Fetch all history for tags
# Build frontend WebUI
- name: Setup Bun
- uses: oven-sh/setup-bun@v1
+ uses: oven-sh/setup-bun@v2
with:
bun-version: latest
@@ -40,7 +40,7 @@ jobs:
echo "Frontend files:"
ls -lh lightrag/api/webui/ | head -10
- - uses: actions/setup-python@v5
+ - uses: actions/setup-python@v6
with:
python-version: "3.x"
@@ -64,7 +64,7 @@ jobs:
python -m build
- name: Upload distributions
- uses: actions/upload-artifact@v4
+ uses: actions/upload-artifact@v5
with:
name: release-dists
path: dist/
@@ -81,7 +81,7 @@ jobs:
steps:
- name: Retrieve release distributions
- uses: actions/download-artifact@v4
+ uses: actions/download-artifact@v6
with:
name: release-dists
path: dist/
diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml
index 30643dda..34634c9c 100644
--- a/.github/workflows/stale.yaml
+++ b/.github/workflows/stale.yaml
@@ -13,7 +13,7 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- - uses: actions/stale@v9
+ - uses: actions/stale@v10
with:
days-before-stale: 90 # 90 days
days-before-close: 7 # 7 days after marked as stale
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
index e7d00f4a..d09d4757 100644
--- a/.github/workflows/tests.yml
+++ b/.github/workflows/tests.yml
@@ -13,13 +13,13 @@ jobs:
strategy:
matrix:
- python-version: ['3.10', '3.11', '3.12']
+ python-version: ['3.10', '3.11', '3.12', '3.13', '3.14']
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v6
- name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v5
+ uses: actions/setup-python@v6
with:
python-version: ${{ matrix.python-version }}
@@ -45,7 +45,7 @@ jobs:
- name: Upload test results
if: always()
- uses: actions/upload-artifact@v4
+ uses: actions/upload-artifact@v5
with:
name: test-results-py${{ matrix.python-version }}
path: |
diff --git a/README-zh.md b/README-zh.md
index 3c72b846..478d67ab 100644
--- a/README-zh.md
+++ b/README-zh.md
@@ -407,6 +407,11 @@ LightRAG 需要利用LLM和Embeding模型来完成文档索引和知识库查询
* LightRAG还支持类OpenAI的聊天/嵌入API:
```python
+import os
+import numpy as np
+from lightrag.utils import wrap_embedding_func_with_attrs
+from lightrag.llm.openai import openai_complete_if_cache, openai_embed
+
async def llm_model_func(
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
) -> str:
@@ -420,8 +425,9 @@ async def llm_model_func(
**kwargs
)
+@wrap_embedding_func_with_attrs(embedding_dim=4096, max_token_size=8192)
async def embedding_func(texts: list[str]) -> np.ndarray:
- return await openai_embed(
+ return await openai_embed.func(
texts,
model="solar-embedding-1-large-query",
api_key=os.getenv("UPSTAGE_API_KEY"),
@@ -432,16 +438,17 @@ async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=4096,
- func=embedding_func
- )
+ embedding_func=embedding_func # 直接传入装饰后的函数
)
await rag.initialize_storages()
return rag
```
+> **关于嵌入函数封装的重要说明:**
+>
+> `EmbeddingFunc` 不能嵌套封装。已经被 `@wrap_embedding_func_with_attrs` 装饰过的嵌入函数(如 `openai_embed`、`ollama_embed` 等)不能再次使用 `EmbeddingFunc()` 封装。这就是为什么在创建自定义嵌入函数时,我们调用 `xxx_embed.func`(底层未封装的函数)而不是直接调用 `xxx_embed`。
+
@@ -478,19 +485,20 @@ rag = LightRAG(
然后您只需要按如下方式设置LightRAG:
```python
+import numpy as np
+from lightrag.utils import wrap_embedding_func_with_attrs
+from lightrag.llm.ollama import ollama_model_complete, ollama_embed
+
+@wrap_embedding_func_with_attrs(embedding_dim=768, max_token_size=8192)
+async def embedding_func(texts: list[str]) -> np.ndarray:
+ return await ollama_embed.func(texts, embed_model="nomic-embed-text")
+
# 使用Ollama模型初始化LightRAG
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=ollama_model_complete, # 使用Ollama模型进行文本生成
llm_model_name='your_model_name', # 您的模型名称
- # 使用Ollama嵌入函数
- embedding_func=EmbeddingFunc(
- embedding_dim=768,
- func=lambda texts: ollama_embed(
- texts,
- embed_model="nomic-embed-text"
- )
- ),
+ embedding_func=embedding_func, # 直接传入装饰后的函数
)
```
@@ -529,22 +537,27 @@ ollama create -f Modelfile qwen2m
您可以使用`llm_model_kwargs`参数配置ollama:
```python
+import numpy as np
+from lightrag.utils import wrap_embedding_func_with_attrs
+from lightrag.llm.ollama import ollama_model_complete, ollama_embed
+
+@wrap_embedding_func_with_attrs(embedding_dim=768, max_token_size=8192)
+async def embedding_func(texts: list[str]) -> np.ndarray:
+ return await ollama_embed.func(texts, embed_model="nomic-embed-text")
+
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=ollama_model_complete, # 使用Ollama模型进行文本生成
llm_model_name='your_model_name', # 您的模型名称
llm_model_kwargs={"options": {"num_ctx": 32768}},
- # 使用Ollama嵌入函数
- embedding_func=EmbeddingFunc(
- embedding_dim=768,
- func=lambda texts: ollama_embed(
- texts,
- embed_model="nomic-embed-text"
- )
- ),
+ embedding_func=embedding_func, # 直接传入装饰后的函数
)
```
+> **关于嵌入函数封装的重要说明:**
+>
+> `EmbeddingFunc` 不能嵌套封装。已经被 `@wrap_embedding_func_with_attrs` 装饰过的嵌入函数(如 `openai_embed`、`ollama_embed` 等)不能再次使用 `EmbeddingFunc()` 封装。这就是为什么在创建自定义嵌入函数时,我们调用 `xxx_embed.func`(底层未封装的函数)而不是直接调用 `xxx_embed`。
+
* **低RAM GPU**
为了在低RAM GPU上运行此实验,您应该选择小型模型并调整上下文窗口(增加上下文会增加内存消耗)。例如,在6Gb RAM的改装挖矿GPU上运行这个ollama示例需要将上下文大小设置为26k,同时使用`gemma2:2b`。它能够在`book.txt`中找到197个实体和19个关系。
diff --git a/README.md b/README.md
index 47906d2e..3147e23c 100644
--- a/README.md
+++ b/README.md
@@ -51,24 +51,24 @@
---
## 🎉 News
-- [2025.11.05]🎯[New Feature]: Integrated **RAGAS for Evaluation** and **Langfuse for Tracing**. Updated the API to return retrieved contexts alongside query results to support context precision metrics.
-- [2025.10.22]🎯[Scalability Enhancement]: Eliminated processing bottlenecks to support **Large-Scale Datasets Efficiently**.
-- [2025.09.15]🎯Significantly enhances KG extraction accuracy for **small LLMs** like Qwen3-30B-A3B.
-- [2025.08.29]🎯**Reranker** is supported now , significantly boosting performance for mixed queries(Set as default query mode now).
-- [2025.08.04]🎯**Document deletion** with KG regeneration to ensure query performance.
-- [2025.06.16]🎯Our team has released [RAG-Anything](https://github.com/HKUDS/RAG-Anything) an All-in-One Multimodal RAG System for seamless text, image, table, and equation processing.
-- [2025.06.05]🎯LightRAG now supports comprehensive multimodal data handling through [RAG-Anything](https://github.com/HKUDS/RAG-Anything) integration, enabling seamless document parsing and RAG capabilities across diverse formats including PDFs, images, Office documents, tables, and formulas. Please refer to the new [multimodal section](https://github.com/HKUDS/LightRAG/?tab=readme-ov-file#multimodal-document-processing-rag-anything-integration) for details.
-- [2025.03.18]🎯LightRAG now supports citation functionality, enabling proper source attribution.
-- [2025.02.12]🎯You can now use MongoDB as all in-one Storage.
-- [2025.02.05]🎯Our team has released [VideoRAG](https://github.com/HKUDS/VideoRAG) understanding extremely long-context videos.
-- [2025.01.13]🎯Our team has released [MiniRAG](https://github.com/HKUDS/MiniRAG) making RAG simpler with small models.
-- [2025.01.06]🎯You can now use PostgreSQL as all in-one Storage.
-- [2024.11.19]🎯A comprehensive guide to LightRAG is now available on [LearnOpenCV](https://learnopencv.com/lightrag). Many thanks to the blog author.
-- [2024.11.09]🎯Introducing the LightRAG Webui, which allows you to insert, query, visualize LightRAG knowledge.
-- [2024.11.04]🎯You can now [use Neo4J for Storage](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#using-neo4j-for-storage).
-- [2024.10.18]🎯We've added a link to a [LightRAG Introduction Video](https://youtu.be/oageL-1I0GE). Thanks to the author!
-- [2024.10.17]🎯We have created a [Discord channel](https://discord.gg/yF2MmDJyGJ)! Welcome to join for sharing and discussions! 🎉🎉
-- [2024.10.16]🎯LightRAG now supports [Ollama models](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#quick-start)!
+- [2025.11]🎯[New Feature]: Integrated **RAGAS for Evaluation** and **Langfuse for Tracing**. Updated the API to return retrieved contexts alongside query results to support context precision metrics.
+- [2025.10]🎯[Scalability Enhancement]: Eliminated processing bottlenecks to support **Large-Scale Datasets Efficiently**.
+- [2025.09]🎯[New Feature] Enhances knowledge graph extraction accuracy for **Open-Sourced LLMs** such as Qwen3-30B-A3B.
+- [2025.08]🎯[New Feature] **Reranker** is now supported, significantly boosting performance for mixed queries (set as default query mode).
+- [2025.08]🎯[New Feature] Added **Document Deletion** with automatic KG regeneration to ensure optimal query performance.
+- [2025.06]🎯[New Release] Our team has released [RAG-Anything](https://github.com/HKUDS/RAG-Anything) — an **All-in-One Multimodal RAG** system for seamless processing of text, images, tables, and equations.
+- [2025.06]🎯[New Feature] LightRAG now supports comprehensive multimodal data handling through [RAG-Anything](https://github.com/HKUDS/RAG-Anything) integration, enabling seamless document parsing and RAG capabilities across diverse formats including PDFs, images, Office documents, tables, and formulas. Please refer to the new [multimodal section](https://github.com/HKUDS/LightRAG/?tab=readme-ov-file#multimodal-document-processing-rag-anything-integration) for details.
+- [2025.03]🎯[New Feature] LightRAG now supports citation functionality, enabling proper source attribution and enhanced document traceability.
+- [2025.02]🎯[New Feature] You can now use MongoDB as an all-in-one storage solution for unified data management.
+- [2025.02]🎯[New Release] Our team has released [VideoRAG](https://github.com/HKUDS/VideoRAG)-a RAG system for understanding extremely long-context videos
+- [2025.01]🎯[New Release] Our team has released [MiniRAG](https://github.com/HKUDS/MiniRAG) making RAG simpler with small models.
+- [2025.01]🎯You can now use PostgreSQL as an all-in-one storage solution for data management.
+- [2024.11]🎯[New Resource] A comprehensive guide to LightRAG is now available on [LearnOpenCV](https://learnopencv.com/lightrag). — explore in-depth tutorials and best practices. Many thanks to the blog author for this excellent contribution!
+- [2024.11]🎯[New Feature] Introducing the LightRAG WebUI — an interface that allows you to insert, query, and visualize LightRAG knowledge through an intuitive web-based dashboard.
+- [2024.11]🎯[New Feature] You can now [use Neo4J for Storage](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#using-neo4j-for-storage)-enabling graph database support.
+- [2024.10]🎯[New Feature] We've added a link to a [LightRAG Introduction Video](https://youtu.be/oageL-1I0GE). — a walkthrough of LightRAG's capabilities. Thanks to the author for this excellent contribution!
+- [2024.10]🎯[New Channel] We have created a [Discord channel](https://discord.gg/yF2MmDJyGJ)!💬 Welcome to join our community for sharing, discussions, and collaboration! 🎉🎉
+- [2024.10]🎯[New Feature] LightRAG now supports [Ollama models](https://github.com/HKUDS/LightRAG?tab=readme-ov-file#quick-start)!
@@ -214,7 +214,7 @@ For a streaming response implementation example, please see `examples/lightrag_o
**Note 2**: Only `lightrag_openai_demo.py` and `lightrag_openai_compatible_demo.py` are officially supported sample codes. Other sample files are community contributions that haven't undergone full testing and optimization.
-## Programing with LightRAG Core
+## Programming with LightRAG Core
> ⚠️ **If you would like to integrate LightRAG into your project, we recommend utilizing the REST API provided by the LightRAG Server**. LightRAG Core is typically intended for embedded applications or for researchers who wish to conduct studies and evaluations.
@@ -313,7 +313,7 @@ A full list of LightRAG init parameters:
| **vector_db_storage_cls_kwargs** | `dict` | Additional parameters for vector database, like setting the threshold for nodes and relations retrieval | cosine_better_than_threshold: 0.2(default value changed by env var COSINE_THRESHOLD) |
| **enable_llm_cache** | `bool` | If `TRUE`, stores LLM results in cache; repeated prompts return cached responses | `TRUE` |
| **enable_llm_cache_for_entity_extract** | `bool` | If `TRUE`, stores LLM results in cache for entity extraction; Good for beginners to debug your application | `TRUE` |
-| **addon_params** | `dict` | Additional parameters, e.g., `{"language": "Simplified Chinese", "entity_types": ["organization", "person", "location", "event"]}`: sets example limit, entiy/relation extraction output language | language: English` |
+| **addon_params** | `dict` | Additional parameters, e.g., `{"language": "Simplified Chinese", "entity_types": ["organization", "person", "location", "event"]}`: sets example limit, entity/relation extraction output language | language: English` |
| **embedding_cache_config** | `dict` | Configuration for question-answer caching. Contains three parameters: `enabled`: Boolean value to enable/disable cache lookup functionality. When enabled, the system will check cached responses before generating new answers. `similarity_threshold`: Float value (0-1), similarity threshold. When a new question's similarity with a cached question exceeds this threshold, the cached answer will be returned directly without calling the LLM. `use_llm_check`: Boolean value to enable/disable LLM similarity verification. When enabled, LLM will be used as a secondary check to verify the similarity between questions before returning cached answers. | Default: `{"enabled": False, "similarity_threshold": 0.95, "use_llm_check": False}` |
@@ -364,7 +364,7 @@ class QueryParam:
max_total_tokens: int = int(os.getenv("MAX_TOTAL_TOKENS", "30000"))
"""Maximum total tokens budget for the entire query context (entities + relations + chunks + system prompt)."""
- # History mesages is only send to LLM for context, not used for retrieval
+ # History messages are only sent to LLM for context, not used for retrieval
conversation_history: list[dict[str, str]] = field(default_factory=list)
"""Stores past conversation history to maintain context.
Format: [{"role": "user/assistant", "content": "message"}].
@@ -403,6 +403,11 @@ LightRAG requires the utilization of LLM and Embedding models to accomplish docu
* LightRAG also supports Open AI-like chat/embeddings APIs:
```python
+import os
+import numpy as np
+from lightrag.utils import wrap_embedding_func_with_attrs
+from lightrag.llm.openai import openai_complete_if_cache, openai_embed
+
async def llm_model_func(
prompt, system_prompt=None, history_messages=[], keyword_extraction=False, **kwargs
) -> str:
@@ -416,8 +421,9 @@ async def llm_model_func(
**kwargs
)
+@wrap_embedding_func_with_attrs(embedding_dim=4096, max_token_size=8192)
async def embedding_func(texts: list[str]) -> np.ndarray:
- return await openai_embed(
+ return await openai_embed.func(
texts,
model="solar-embedding-1-large-query",
api_key=os.getenv("UPSTAGE_API_KEY"),
@@ -428,16 +434,17 @@ async def initialize_rag():
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=llm_model_func,
- embedding_func=EmbeddingFunc(
- embedding_dim=4096,
- func=embedding_func
- )
+ embedding_func=embedding_func # Pass the decorated function directly
)
await rag.initialize_storages()
return rag
```
+> **Important Note on Embedding Function Wrapping:**
+>
+> `EmbeddingFunc` cannot be nested. Functions that have been decorated with `@wrap_embedding_func_with_attrs` (such as `openai_embed`, `ollama_embed`, etc.) cannot be wrapped again using `EmbeddingFunc()`. This is why we call `xxx_embed.func` (the underlying unwrapped function) instead of `xxx_embed` directly when creating custom embedding functions.
+
@@ -476,19 +483,20 @@ If you want to use Ollama models, you need to pull model you plan to use and emb
Then you only need to set LightRAG as follows:
```python
+import numpy as np
+from lightrag.utils import wrap_embedding_func_with_attrs
+from lightrag.llm.ollama import ollama_model_complete, ollama_embed
+
+@wrap_embedding_func_with_attrs(embedding_dim=768, max_token_size=8192)
+async def embedding_func(texts: list[str]) -> np.ndarray:
+ return await ollama_embed.func(texts, embed_model="nomic-embed-text")
+
# Initialize LightRAG with Ollama model
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=ollama_model_complete, # Use Ollama model for text generation
llm_model_name='your_model_name', # Your model name
- # Use Ollama embedding function
- embedding_func=EmbeddingFunc(
- embedding_dim=768,
- func=lambda texts: ollama_embed(
- texts,
- embed_model="nomic-embed-text"
- )
- ),
+ embedding_func=embedding_func, # Pass the decorated function directly
)
```
@@ -527,22 +535,27 @@ ollama create -f Modelfile qwen2m
Tiy can use `llm_model_kwargs` param to configure ollama:
```python
+import numpy as np
+from lightrag.utils import wrap_embedding_func_with_attrs
+from lightrag.llm.ollama import ollama_model_complete, ollama_embed
+
+@wrap_embedding_func_with_attrs(embedding_dim=768, max_token_size=8192)
+async def embedding_func(texts: list[str]) -> np.ndarray:
+ return await ollama_embed.func(texts, embed_model="nomic-embed-text")
+
rag = LightRAG(
working_dir=WORKING_DIR,
llm_model_func=ollama_model_complete, # Use Ollama model for text generation
llm_model_name='your_model_name', # Your model name
llm_model_kwargs={"options": {"num_ctx": 32768}},
- # Use Ollama embedding function
- embedding_func=EmbeddingFunc(
- embedding_dim=768,
- func=lambda texts: ollama_embed(
- texts,
- embed_model="nomic-embed-text"
- )
- ),
+ embedding_func=embedding_func, # Pass the decorated function directly
)
```
+> **Important Note on Embedding Function Wrapping:**
+>
+> `EmbeddingFunc` cannot be nested. Functions that have been decorated with `@wrap_embedding_func_with_attrs` (such as `openai_embed`, `ollama_embed`, etc.) cannot be wrapped again using `EmbeddingFunc()`. This is why we call `xxx_embed.func` (the underlying unwrapped function) instead of `xxx_embed` directly when creating custom embedding functions.
+
* **Low RAM GPUs**
In order to run this experiment on low RAM GPU you should select small model and tune context window (increasing context increase memory consumption). For example, running this ollama example on repurposed mining GPU with 6Gb of RAM required to set context size to 26k while using `gemma2:2b`. It was able to find 197 entities and 19 relations on `book.txt`.
@@ -1555,7 +1568,7 @@ Langfuse provides a drop-in replacement for the OpenAI client that automatically
pip install lightrag-hku
pip install lightrag-hku[observability]
-# Or install from souce code with debug mode enabled
+# Or install from source code with debug mode enabled
pip install -e .
pip install -e ".[observability]"
```
diff --git a/env.example b/env.example
index fea99953..d30a03cb 100644
--- a/env.example
+++ b/env.example
@@ -447,6 +447,17 @@ MEMGRAPH_DATABASE=memgraph
### DB specific workspace should not be set, keep for compatible only
### MEMGRAPH_WORKSPACE=forced_workspace_name
+###########################################################
+### Langfuse Observability Configuration
+### Only works with LLM provided by OpenAI compatible API
+### Install with: pip install lightrag-hku[observability]
+### Sign up at: https://cloud.langfuse.com or self-host
+###########################################################
+# LANGFUSE_SECRET_KEY=""
+# LANGFUSE_PUBLIC_KEY=""
+# LANGFUSE_HOST="https://cloud.langfuse.com" # 或您的自托管实例地址
+# LANGFUSE_ENABLE_TRACE=true
+
############################
### Evaluation Configuration
############################
diff --git a/lightrag/api/__init__.py b/lightrag/api/__init__.py
index 1e689c38..417d3445 100644
--- a/lightrag/api/__init__.py
+++ b/lightrag/api/__init__.py
@@ -1 +1 @@
-__api_version__ = "0256"
+__api_version__ = "0258"
diff --git a/lightrag/api/config.py b/lightrag/api/config.py
index 4f59d3c1..4d8ab1e1 100644
--- a/lightrag/api/config.py
+++ b/lightrag/api/config.py
@@ -365,8 +365,12 @@ def parse_args() -> argparse.Namespace:
# Inject model configuration
args.llm_model = get_env_value("LLM_MODEL", "mistral-nemo:latest")
- args.embedding_model = get_env_value("EMBEDDING_MODEL", "bge-m3:latest")
- args.embedding_dim = get_env_value("EMBEDDING_DIM", 1024, int)
+ # EMBEDDING_MODEL defaults to None - each binding will use its own default model
+ # e.g., OpenAI uses "text-embedding-3-small", Jina uses "jina-embeddings-v4"
+ args.embedding_model = get_env_value("EMBEDDING_MODEL", None, special_none=True)
+ # EMBEDDING_DIM defaults to None - each binding will use its own default dimension
+ # Value is inherited from provider defaults via wrap_embedding_func_with_attrs decorator
+ args.embedding_dim = get_env_value("EMBEDDING_DIM", None, int, special_none=True)
args.embedding_send_dim = get_env_value("EMBEDDING_SEND_DIM", False, bool)
# Inject chunk configuration
diff --git a/lightrag/api/lightrag_server.py b/lightrag/api/lightrag_server.py
index a8a14c66..5f59085a 100644
--- a/lightrag/api/lightrag_server.py
+++ b/lightrag/api/lightrag_server.py
@@ -654,6 +654,17 @@ def create_app(args):
2. Extracts max_token_size and embedding_dim from provider if it's an EmbeddingFunc
3. Creates an optimized wrapper that calls the underlying function directly (avoiding double-wrapping)
4. Returns a properly configured EmbeddingFunc instance
+
+ Configuration Rules:
+ - When EMBEDDING_MODEL is not set: Uses provider's default model and dimension
+ (e.g., jina-embeddings-v4 with 2048 dims, text-embedding-3-small with 1536 dims)
+ - When EMBEDDING_MODEL is set to a custom model: User MUST also set EMBEDDING_DIM
+ to match the custom model's dimension (e.g., for jina-embeddings-v3, set EMBEDDING_DIM=1024)
+
+ Note: The embedding_dim parameter is automatically injected by EmbeddingFunc wrapper
+ when send_dimensions=True (enabled for Jina and Gemini bindings). This wrapper calls
+ the underlying provider function directly (.func) to avoid double-wrapping, so we must
+ explicitly pass embedding_dim to the provider's underlying function.
"""
# Step 1: Import provider function and extract default attributes
@@ -713,6 +724,7 @@ def create_app(args):
)
# Step 3: Create optimized embedding function (calls underlying function directly)
+ # Note: When model is None, each binding will use its own default model
async def optimized_embedding_function(texts, embedding_dim=None):
try:
if binding == "lollms":
@@ -724,9 +736,9 @@ def create_app(args):
if isinstance(lollms_embed, EmbeddingFunc)
else lollms_embed
)
- return await actual_func(
- texts, embed_model=model, host=host, api_key=api_key
- )
+ # lollms embed_model is not used (server uses configured vectorizer)
+ # Only pass base_url and api_key
+ return await actual_func(texts, base_url=host, api_key=api_key)
elif binding == "ollama":
from lightrag.llm.ollama import ollama_embed
@@ -745,13 +757,16 @@ def create_app(args):
ollama_options = OllamaEmbeddingOptions.options_dict(args)
- return await actual_func(
- texts,
- embed_model=model,
- host=host,
- api_key=api_key,
- options=ollama_options,
- )
+ # Pass embed_model only if provided, let function use its default (bge-m3:latest)
+ kwargs = {
+ "texts": texts,
+ "host": host,
+ "api_key": api_key,
+ "options": ollama_options,
+ }
+ if model:
+ kwargs["embed_model"] = model
+ return await actual_func(**kwargs)
elif binding == "azure_openai":
from lightrag.llm.azure_openai import azure_openai_embed
@@ -760,7 +775,11 @@ def create_app(args):
if isinstance(azure_openai_embed, EmbeddingFunc)
else azure_openai_embed
)
- return await actual_func(texts, model=model, api_key=api_key)
+ # Pass model only if provided, let function use its default otherwise
+ kwargs = {"texts": texts, "api_key": api_key}
+ if model:
+ kwargs["model"] = model
+ return await actual_func(**kwargs)
elif binding == "aws_bedrock":
from lightrag.llm.bedrock import bedrock_embed
@@ -769,7 +788,11 @@ def create_app(args):
if isinstance(bedrock_embed, EmbeddingFunc)
else bedrock_embed
)
- return await actual_func(texts, model=model)
+ # Pass model only if provided, let function use its default otherwise
+ kwargs = {"texts": texts}
+ if model:
+ kwargs["model"] = model
+ return await actual_func(**kwargs)
elif binding == "jina":
from lightrag.llm.jina import jina_embed
@@ -778,12 +801,16 @@ def create_app(args):
if isinstance(jina_embed, EmbeddingFunc)
else jina_embed
)
- return await actual_func(
- texts,
- embedding_dim=embedding_dim,
- base_url=host,
- api_key=api_key,
- )
+ # Pass model only if provided, let function use its default (jina-embeddings-v4)
+ kwargs = {
+ "texts": texts,
+ "embedding_dim": embedding_dim,
+ "base_url": host,
+ "api_key": api_key,
+ }
+ if model:
+ kwargs["model"] = model
+ return await actual_func(**kwargs)
elif binding == "gemini":
from lightrag.llm.gemini import gemini_embed
@@ -801,14 +828,19 @@ def create_app(args):
gemini_options = GeminiEmbeddingOptions.options_dict(args)
- return await actual_func(
- texts,
- model=model,
- base_url=host,
- api_key=api_key,
- embedding_dim=embedding_dim,
- task_type=gemini_options.get("task_type", "RETRIEVAL_DOCUMENT"),
- )
+ # Pass model only if provided, let function use its default (gemini-embedding-001)
+ kwargs = {
+ "texts": texts,
+ "base_url": host,
+ "api_key": api_key,
+ "embedding_dim": embedding_dim,
+ "task_type": gemini_options.get(
+ "task_type", "RETRIEVAL_DOCUMENT"
+ ),
+ }
+ if model:
+ kwargs["model"] = model
+ return await actual_func(**kwargs)
else: # openai and compatible
from lightrag.llm.openai import openai_embed
@@ -817,13 +849,16 @@ def create_app(args):
if isinstance(openai_embed, EmbeddingFunc)
else openai_embed
)
- return await actual_func(
- texts,
- model=model,
- base_url=host,
- api_key=api_key,
- embedding_dim=embedding_dim,
- )
+ # Pass model only if provided, let function use its default (text-embedding-3-small)
+ kwargs = {
+ "texts": texts,
+ "base_url": host,
+ "api_key": api_key,
+ "embedding_dim": embedding_dim,
+ }
+ if model:
+ kwargs["model"] = model
+ return await actual_func(**kwargs)
except ImportError as e:
raise Exception(f"Failed to import {binding} embedding: {e}")
diff --git a/lightrag/api/run_with_gunicorn.py b/lightrag/api/run_with_gunicorn.py
index deabe7cf..e3bc0a8c 100644
--- a/lightrag/api/run_with_gunicorn.py
+++ b/lightrag/api/run_with_gunicorn.py
@@ -100,7 +100,7 @@ def main():
print("\nHow to fix:")
print(" Option 1 - Set environment variable before starting (recommended):")
print(" export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES")
- print(" lightrag-server")
+ print(" lightrag-gunicorn --workers 2")
print("\n Option 2 - Add to your shell profile (~/.zshrc or ~/.bash_profile):")
print(" echo 'export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES' >> ~/.zshrc")
print(" source ~/.zshrc")
diff --git a/lightrag/kg/postgres_impl.py b/lightrag/kg/postgres_impl.py
index 1447a79e..49069ce3 100644
--- a/lightrag/kg/postgres_impl.py
+++ b/lightrag/kg/postgres_impl.py
@@ -383,7 +383,7 @@ class PostgreSQLDB:
async def configure_age_extension(connection: asyncpg.Connection) -> None:
"""Create AGE extension if it doesn't exist for graph operations."""
try:
- await connection.execute("CREATE EXTENSION IF NOT EXISTS age") # type: ignore
+ await connection.execute("CREATE EXTENSION IF NOT EXISTS AGE CASCADE") # type: ignore
logger.info("PostgreSQL, AGE extension enabled")
except Exception as e:
logger.warning(f"Could not create AGE extension: {e}")
diff --git a/lightrag/kg/shared_storage.py b/lightrag/kg/shared_storage.py
index 834cdc8f..ef0f61e2 100644
--- a/lightrag/kg/shared_storage.py
+++ b/lightrag/kg/shared_storage.py
@@ -1683,3 +1683,17 @@ def get_default_workspace() -> str:
"""
global _default_workspace
return _default_workspace
+
+
+def get_pipeline_status_lock(
+ enable_logging: bool = False, workspace: str = None
+) -> NamespaceLock:
+ """Return unified storage lock for pipeline status data consistency.
+
+ This function is for compatibility with legacy code only.
+ """
+ global _default_workspace
+ actual_workspace = workspace if workspace else _default_workspace
+ return get_namespace_lock(
+ "pipeline_status", workspace=actual_workspace, enable_logging=enable_logging
+ )
diff --git a/lightrag/llm/jina.py b/lightrag/llm/jina.py
index f61faadd..41251f4a 100644
--- a/lightrag/llm/jina.py
+++ b/lightrag/llm/jina.py
@@ -69,6 +69,7 @@ async def fetch_data(url, headers, data):
)
async def jina_embed(
texts: list[str],
+ model: str = "jina-embeddings-v4",
embedding_dim: int = 2048,
late_chunking: bool = False,
base_url: str = None,
@@ -78,6 +79,8 @@ async def jina_embed(
Args:
texts: List of texts to embed.
+ model: The Jina embedding model to use (default: jina-embeddings-v4).
+ Supported models: jina-embeddings-v3, jina-embeddings-v4, etc.
embedding_dim: The embedding dimensions (default: 2048 for jina-embeddings-v4).
**IMPORTANT**: This parameter is automatically injected by the EmbeddingFunc wrapper.
Do NOT manually pass this parameter when calling the function directly.
@@ -107,7 +110,7 @@ async def jina_embed(
"Authorization": f"Bearer {os.environ['JINA_API_KEY']}",
}
data = {
- "model": "jina-embeddings-v4",
+ "model": model,
"task": "text-matching",
"dimensions": embedding_dim,
"embedding_type": "base64",
diff --git a/lightrag/llm/ollama.py b/lightrag/llm/ollama.py
index e35dc293..cd633e80 100644
--- a/lightrag/llm/ollama.py
+++ b/lightrag/llm/ollama.py
@@ -173,7 +173,9 @@ async def ollama_model_complete(
@wrap_embedding_func_with_attrs(embedding_dim=1024, max_token_size=8192)
-async def ollama_embed(texts: list[str], embed_model, **kwargs) -> np.ndarray:
+async def ollama_embed(
+ texts: list[str], embed_model: str = "bge-m3:latest", **kwargs
+) -> np.ndarray:
api_key = kwargs.pop("api_key", None)
if not api_key:
api_key = os.getenv("OLLAMA_API_KEY")
diff --git a/lightrag/llm/openai.py b/lightrag/llm/openai.py
index 11a5f9d7..9c3d0261 100644
--- a/lightrag/llm/openai.py
+++ b/lightrag/llm/openai.py
@@ -309,6 +309,10 @@ async def openai_complete_if_cache(
response = await openai_async_client.chat.completions.create(
model=api_model, messages=messages, **kwargs
)
+ except APITimeoutError as e:
+ logger.error(f"OpenAI API Timeout Error: {e}")
+ await openai_async_client.close() # Ensure client is closed
+ raise
except APIConnectionError as e:
logger.error(f"OpenAI API Connection Error: {e}")
await openai_async_client.close() # Ensure client is closed
@@ -317,10 +321,6 @@ async def openai_complete_if_cache(
logger.error(f"OpenAI API Rate Limit Error: {e}")
await openai_async_client.close() # Ensure client is closed
raise
- except APITimeoutError as e:
- logger.error(f"OpenAI API Timeout Error: {e}")
- await openai_async_client.close() # Ensure client is closed
- raise
except Exception as e:
logger.error(
f"OpenAI API Call Failed,\nModel: {model},\nParams: {kwargs}, Got: {e}"
@@ -867,7 +867,7 @@ async def azure_openai_complete(
return result
-@wrap_embedding_func_with_attrs(embedding_dim=1536)
+@wrap_embedding_func_with_attrs(embedding_dim=1536, max_token_size=8192)
async def azure_openai_embed(
texts: list[str],
model: str | None = None,
diff --git a/lightrag/operate.py b/lightrag/operate.py
index 5f824af0..c6724974 100644
--- a/lightrag/operate.py
+++ b/lightrag/operate.py
@@ -397,8 +397,8 @@ async def _handle_single_entity_extraction(
# Validate entity name after all cleaning steps
if not entity_name or not entity_name.strip():
- logger.warning(
- f"Entity extraction error: entity name became empty after cleaning. Original: '{record_attributes[1]}'"
+ logger.info(
+ f"Empty entity name found after sanitization. Original: '{record_attributes[1]}'"
)
return None
@@ -474,14 +474,14 @@ async def _handle_single_relationship_extraction(
# Validate entity names after all cleaning steps
if not source:
- logger.warning(
- f"Relationship extraction error: source entity became empty after cleaning. Original: '{record_attributes[1]}'"
+ logger.info(
+ f"Empty source entity found after sanitization. Original: '{record_attributes[1]}'"
)
return None
if not target:
- logger.warning(
- f"Relationship extraction error: target entity became empty after cleaning. Original: '{record_attributes[2]}'"
+ logger.info(
+ f"Empty target entity found after sanitization. Original: '{record_attributes[2]}'"
)
return None
diff --git a/lightrag_webui/src/components/retrieval/ChatMessage.tsx b/lightrag_webui/src/components/retrieval/ChatMessage.tsx
index 7570f503..e490941c 100644
--- a/lightrag_webui/src/components/retrieval/ChatMessage.tsx
+++ b/lightrag_webui/src/components/retrieval/ChatMessage.tsx
@@ -76,7 +76,8 @@ export const ChatMessage = ({
? message.content
: (displayContent !== undefined ? displayContent : (message.content || ''))
- // Load KaTeX dynamically
+ // Load KaTeX rehype plugin dynamically
+ // Note: KaTeX extensions (mhchem, copy-tex) are imported statically in main.tsx
useEffect(() => {
const loadKaTeX = async () => {
try {
@@ -84,7 +85,6 @@ export const ChatMessage = ({
setKatexPlugin(() => rehypeKatex);
} catch (error) {
console.error('Failed to load KaTeX plugin:', error);
- // Set to null to ensure we don't try to use a failed plugin
setKatexPlugin(null);
}
};
diff --git a/lightrag_webui/src/main.tsx b/lightrag_webui/src/main.tsx
index fb234a60..95a3e587 100644
--- a/lightrag_webui/src/main.tsx
+++ b/lightrag_webui/src/main.tsx
@@ -4,6 +4,9 @@ import './index.css'
import AppRouter from './AppRouter'
import './i18n.ts';
import 'katex/dist/katex.min.css';
+// Import KaTeX extensions at app startup to ensure they are registered before any rendering
+import 'katex/contrib/mhchem'; // Chemistry formulas: \ce{} and \pu{}
+import 'katex/contrib/copy-tex'; // Allow copying rendered formulas as LaTeX source
diff --git a/lightrag_webui/src/types/katex.d.ts b/lightrag_webui/src/types/katex.d.ts
index de362150..499dbf30 100644
--- a/lightrag_webui/src/types/katex.d.ts
+++ b/lightrag_webui/src/types/katex.d.ts
@@ -1 +1,2 @@
declare module 'katex/contrib/mhchem';
+declare module 'katex/contrib/copy-tex';
diff --git a/lightrag_webui/vite.config.ts b/lightrag_webui/vite.config.ts
index f39969c6..c89c34b4 100644
--- a/lightrag_webui/vite.config.ts
+++ b/lightrag_webui/vite.config.ts
@@ -10,7 +10,10 @@ export default defineConfig({
resolve: {
alias: {
'@': path.resolve(__dirname, './src')
- }
+ },
+ // Force all modules to use the same katex instance
+ // This ensures mhchem extension registered in main.tsx is available to rehype-katex
+ dedupe: ['katex']
},
// base: import.meta.env.VITE_BASE_URL || '/webui/',
base: webuiPrefix,
diff --git a/pyproject.toml b/pyproject.toml
index 31667ab9..761a3309 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -23,7 +23,6 @@ classifiers = [
dependencies = [
"aiohttp",
"configparser",
- "future",
"google-api-core>=2.0.0,<3.0.0",
"google-genai>=1.0.0,<2.0.0",
"json_repair",
@@ -54,7 +53,6 @@ api = [
# Core dependencies
"aiohttp",
"configparser",
- "future",
"json_repair",
"nano-vectordb",
"networkx",
@@ -78,9 +76,9 @@ api = [
"distro",
"fastapi",
"httpcore",
- "httpx",
+ "httpx>=0.28.1",
"jiter",
- "passlib[bcrypt]",
+ "bcrypt>=4.0.0",
"psutil",
"PyJWT>=2.8.0,<3.0.0",
"python-jose[cryptography]",
@@ -132,16 +130,18 @@ offline = [
"lightrag-hku[api,offline-storage,offline-llm]",
]
-evaluation = [
- # Test framework dependencies
+test = [
+ "lightrag-hku[api]",
"pytest>=8.4.2",
"pytest-asyncio>=1.2.0",
"pre-commit",
"ruff",
- # RAG evaluation dependencies (RAGAS framework)
+]
+
+evaluation = [
+ "lightrag-hku[api]",
"ragas>=0.3.7",
"datasets>=4.3.0",
- "httpx>=0.28.1",
]
observability = [
diff --git a/uv.lock b/uv.lock
index fb483760..b07a8ea1 100644
--- a/uv.lock
+++ b/uv.lock
@@ -1334,15 +1334,6 @@ http = [
{ name = "aiohttp" },
]
-[[package]]
-name = "future"
-version = "1.0.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/a7/b2/4140c69c6a66432916b26158687e821ba631a4c9273c474343badf84d3ba/future-1.0.0.tar.gz", hash = "sha256:bd2968309307861edae1458a4f8a4f3598c03be43b97521076aebf5d94c07b05", size = 1228490, upload-time = "2024-02-21T11:52:38.461Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/da/71/ae30dadffc90b9006d77af76b393cb9dfbfc9629f339fc1574a1c52e6806/future-1.0.0-py3-none-any.whl", hash = "sha256:929292d34f5872e70396626ef385ec22355a1fae8ad29e1a734c3e43f9fbc216", size = 491326, upload-time = "2024-02-21T11:52:35.956Z" },
-]
-
[[package]]
name = "gitdb"
version = "4.0.12"
@@ -2542,7 +2533,6 @@ source = { editable = "." }
dependencies = [
{ name = "aiohttp" },
{ name = "configparser" },
- { name = "future" },
{ name = "google-api-core" },
{ name = "google-genai" },
{ name = "json-repair" },
@@ -2567,10 +2557,10 @@ api = [
{ name = "aiohttp" },
{ name = "ascii-colors" },
{ name = "asyncpg" },
+ { name = "bcrypt" },
{ name = "configparser" },
{ name = "distro" },
{ name = "fastapi" },
- { name = "future" },
{ name = "google-api-core" },
{ name = "google-genai" },
{ name = "gunicorn" },
@@ -2585,7 +2575,6 @@ api = [
{ name = "openai" },
{ name = "openpyxl" },
{ name = "pandas" },
- { name = "passlib", extra = ["bcrypt"] },
{ name = "pipmaster" },
{ name = "psutil" },
{ name = "pycryptodome" },
@@ -2627,10 +2616,10 @@ offline = [
{ name = "anthropic" },
{ name = "ascii-colors" },
{ name = "asyncpg" },
+ { name = "bcrypt" },
{ name = "configparser" },
{ name = "distro" },
{ name = "fastapi" },
- { name = "future" },
{ name = "google-api-core" },
{ name = "google-genai" },
{ name = "gunicorn" },
@@ -2648,7 +2637,6 @@ offline = [
{ name = "openai" },
{ name = "openpyxl" },
{ name = "pandas" },
- { name = "passlib", extra = ["bcrypt"] },
{ name = "pipmaster" },
{ name = "psutil" },
{ name = "pycryptodome" },
@@ -2714,14 +2702,13 @@ requires-dist = [
{ name = "ascii-colors", marker = "extra == 'api'" },
{ name = "asyncpg", marker = "extra == 'api'" },
{ name = "asyncpg", marker = "extra == 'offline-storage'", specifier = ">=0.29.0,<1.0.0" },
+ { name = "bcrypt", marker = "extra == 'api'", specifier = ">=4.0.0" },
{ name = "configparser" },
{ name = "configparser", marker = "extra == 'api'" },
{ name = "datasets", marker = "extra == 'evaluation'", specifier = ">=4.3.0" },
{ name = "distro", marker = "extra == 'api'" },
{ name = "docling", marker = "sys_platform != 'darwin' and extra == 'docling'", specifier = ">=2.0.0,<3.0.0" },
{ name = "fastapi", marker = "extra == 'api'" },
- { name = "future" },
- { name = "future", marker = "extra == 'api'" },
{ name = "google-api-core", specifier = ">=2.0.0,<3.0.0" },
{ name = "google-api-core", marker = "extra == 'api'", specifier = ">=2.0.0,<3.0.0" },
{ name = "google-api-core", marker = "extra == 'offline-llm'", specifier = ">=2.0.0,<3.0.0" },
@@ -2751,7 +2738,6 @@ requires-dist = [
{ name = "openpyxl", marker = "extra == 'api'", specifier = ">=3.0.0,<4.0.0" },
{ name = "pandas", specifier = ">=2.0.0,<2.4.0" },
{ name = "pandas", marker = "extra == 'api'", specifier = ">=2.0.0,<2.4.0" },
- { name = "passlib", extras = ["bcrypt"], marker = "extra == 'api'" },
{ name = "pipmaster" },
{ name = "pipmaster", marker = "extra == 'api'" },
{ name = "pre-commit", marker = "extra == 'evaluation'" },
@@ -4110,20 +4096,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/ab/5f/b38085618b950b79d2d9164a711c52b10aefc0ae6833b96f626b7021b2ed/pandas-2.2.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ad5b65698ab28ed8d7f18790a0dc58005c7629f227be9ecc1072aa74c0c1d43a", size = 13098436, upload-time = "2024-09-20T13:09:48.112Z" },
]
-[[package]]
-name = "passlib"
-version = "1.7.4"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/b6/06/9da9ee59a67fae7761aab3ccc84fa4f3f33f125b370f1ccdb915bf967c11/passlib-1.7.4.tar.gz", hash = "sha256:defd50f72b65c5402ab2c573830a6978e5f202ad0d984793c8dde2c4152ebe04", size = 689844, upload-time = "2020-10-08T19:00:52.121Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/3b/a4/ab6b7589382ca3df236e03faa71deac88cae040af60c071a78d254a62172/passlib-1.7.4-py2.py3-none-any.whl", hash = "sha256:aa6bca462b8d8bda89c70b382f0c298a20b5560af6cbfa2dce410c0a2fb669f1", size = 525554, upload-time = "2020-10-08T19:00:49.856Z" },
-]
-
-[package.optional-dependencies]
-bcrypt = [
- { name = "bcrypt" },
-]
-
[[package]]
name = "pillow"
version = "11.3.0"