From 77399e051f44866667f165fddb92d75fc8108b3e Mon Sep 17 00:00:00 2001 From: eddiemaru-101 Date: Fri, 30 May 2025 22:43:52 +0900 Subject: [PATCH 1/4] Fix: Increase Ollama timeout values to prevent ReadTimeout errors --- lightrag/llm/ollama.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lightrag/llm/ollama.py b/lightrag/llm/ollama.py index c86aea1c..724716a9 100644 --- a/lightrag/llm/ollama.py +++ b/lightrag/llm/ollama.py @@ -50,7 +50,7 @@ async def _ollama_model_if_cache( kwargs.pop("max_tokens", None) # kwargs.pop("response_format", None) # allow json host = kwargs.pop("host", None) - timeout = kwargs.pop("timeout", None) or 300 # Default timeout 300s + timeout = kwargs.pop("timeout", None) or 600 # Default timeout 600s (10분으로 증가) kwargs.pop("hashing_kv", None) api_key = kwargs.pop("api_key", None) headers = { @@ -146,7 +146,7 @@ async def ollama_embed(texts: list[str], embed_model, **kwargs) -> np.ndarray: headers["Authorization"] = f"Bearer {api_key}" host = kwargs.pop("host", None) - timeout = kwargs.pop("timeout", None) or 90 # Default time out 90s + timeout = kwargs.pop("timeout", None) or 300 # Default time out 300s (5분으로 증가) ollama_client = ollama.AsyncClient(host=host, timeout=timeout, headers=headers) @@ -168,4 +168,4 @@ async def ollama_embed(texts: list[str], embed_model, **kwargs) -> np.ndarray: await ollama_client._client.aclose() logger.debug("Successfully closed Ollama client after embed") except Exception as close_error: - logger.warning(f"Failed to close Ollama client after embed: {close_error}") + logger.warning(f"Failed to close Ollama client after embed: {close_error}") \ No newline at end of file From b8a259840447d7f04886b63ad6e54d010733bcf0 Mon Sep 17 00:00:00 2001 From: Chaoying <32626585+Chaoyingz@users.noreply.github.com> Date: Wed, 11 Jun 2025 10:59:40 +0800 Subject: [PATCH 2/4] Fix incorrect spacing --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 881f66ea..e156f5e5 100644 --- a/README.md +++ b/README.md @@ -191,7 +191,7 @@ async def main(): rag.insert("Your text") # Perform hybrid search - mode="hybrid" + mode = "hybrid" print( await rag.query( "What are the top themes in this story?", From 96b9bd8cc56c607db50feaf2a8d3f54eeaf1660f Mon Sep 17 00:00:00 2001 From: zrguo <49157727+LarFii@users.noreply.github.com> Date: Thu, 19 Jun 2025 14:16:24 +0800 Subject: [PATCH 3/4] fix lint --- lightrag/llm/ollama.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lightrag/llm/ollama.py b/lightrag/llm/ollama.py index 724716a9..5cd8a721 100644 --- a/lightrag/llm/ollama.py +++ b/lightrag/llm/ollama.py @@ -50,7 +50,7 @@ async def _ollama_model_if_cache( kwargs.pop("max_tokens", None) # kwargs.pop("response_format", None) # allow json host = kwargs.pop("host", None) - timeout = kwargs.pop("timeout", None) or 600 # Default timeout 600s (10분으로 증가) + timeout = kwargs.pop("timeout", None) or 600 # Default timeout 600s kwargs.pop("hashing_kv", None) api_key = kwargs.pop("api_key", None) headers = { @@ -146,7 +146,7 @@ async def ollama_embed(texts: list[str], embed_model, **kwargs) -> np.ndarray: headers["Authorization"] = f"Bearer {api_key}" host = kwargs.pop("host", None) - timeout = kwargs.pop("timeout", None) or 300 # Default time out 300s (5분으로 증가) + timeout = kwargs.pop("timeout", None) or 300 # Default time out 300s ollama_client = ollama.AsyncClient(host=host, timeout=timeout, headers=headers) @@ -168,4 +168,4 @@ async def ollama_embed(texts: list[str], embed_model, **kwargs) -> np.ndarray: await ollama_client._client.aclose() logger.debug("Successfully closed Ollama client after embed") except Exception as close_error: - logger.warning(f"Failed to close Ollama client after embed: {close_error}") \ No newline at end of file + logger.warning(f"Failed to close Ollama client after embed: {close_error}") From d1aeb291d66feaf09ec0a59ca82eb877e7f7821d Mon Sep 17 00:00:00 2001 From: zrguo <49157727+LarFii@users.noreply.github.com> Date: Thu, 19 Jun 2025 17:01:21 +0800 Subject: [PATCH 4/4] Update README.md --- README.md | 140 +++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 107 insertions(+), 33 deletions(-) diff --git a/README.md b/README.md index 9fbd97cd..8fdf4439 100644 --- a/README.md +++ b/README.md @@ -1254,6 +1254,33 @@ Valid modes are: +## Troubleshooting + +### Common Initialization Errors + +If you encounter these errors when using LightRAG: + +1. **`AttributeError: __aenter__`** + - **Cause**: Storage backends not initialized + - **Solution**: Call `await rag.initialize_storages()` after creating the LightRAG instance + +2. **`KeyError: 'history_messages'`** + - **Cause**: Pipeline status not initialized + - **Solution**: Call `await initialize_pipeline_status()` after initializing storages + +3. **Both errors in sequence** + - **Cause**: Neither initialization method was called + - **Solution**: Always follow this pattern: + ```python + rag = LightRAG(...) + await rag.initialize_storages() + await initialize_pipeline_status() + ``` + +### Model Switching Issues + +When switching between different embedding models, you must clear the data directory to avoid errors. The only file you may want to preserve is `kv_store_llm_response_cache.json` if you wish to retain the LLM cache. + ## LightRAG API The LightRAG Server is designed to provide Web UI and API support. **For more information about LightRAG Server, please refer to [LightRAG Server](./lightrag/api/README.md).** @@ -1519,7 +1546,47 @@ def extract_queries(file_path): -## Star History +## 🔗 Related Projects + +*Ecosystem & Extensions* + +
+ + + + + + +
+ +
+ 📸 +
+ RAG-Anything
+ Multimodal RAG +
+
+ +
+ 🎥 +
+ VideoRAG
+ Extreme Long-Context Video RAG +
+
+ +
+ +
+ MiniRAG
+ Extremely Simple RAG +
+
+
+ +--- + +## ⭐ Star History @@ -1529,42 +1596,22 @@ def extract_queries(file_path): -## Contribution +## 🤝 Contribution -Thank you to all our contributors! +
+ We thank all our contributors for their valuable contributions. +
- - - +
+ + + +
-## Troubleshooting +--- -### Common Initialization Errors -If you encounter these errors when using LightRAG: - -1. **`AttributeError: __aenter__`** - - **Cause**: Storage backends not initialized - - **Solution**: Call `await rag.initialize_storages()` after creating the LightRAG instance - -2. **`KeyError: 'history_messages'`** - - **Cause**: Pipeline status not initialized - - **Solution**: Call `await initialize_pipeline_status()` after initializing storages - -3. **Both errors in sequence** - - **Cause**: Neither initialization method was called - - **Solution**: Always follow this pattern: - ```python - rag = LightRAG(...) - await rag.initialize_storages() - await initialize_pipeline_status() - ``` - -### Model Switching Issues - -When switching between different embedding models, you must clear the data directory to avoid errors. The only file you may want to preserve is `kv_store_llm_response_cache.json` if you wish to retain the LLM cache. - -## 🌟Citation +## 📖 Citation ```python @article{guo2024lightrag, @@ -1577,4 +1624,31 @@ primaryClass={cs.IR} } ``` -**Thank you for your interest in our work!** +--- + +
+
+ +
+
+ + + + + + + + + +
+
+ +
+
+
+ + Thank you for visiting LightRAG! + +
+
+