diff --git a/README.md b/README.md index 1234e554..8fdf4439 100644 --- a/README.md +++ b/README.md @@ -192,7 +192,7 @@ async def main(): rag.insert("Your text") # Perform hybrid search - mode="hybrid" + mode = "hybrid" print( await rag.query( "What are the top themes in this story?", @@ -1254,6 +1254,33 @@ Valid modes are: +## Troubleshooting + +### Common Initialization Errors + +If you encounter these errors when using LightRAG: + +1. **`AttributeError: __aenter__`** + - **Cause**: Storage backends not initialized + - **Solution**: Call `await rag.initialize_storages()` after creating the LightRAG instance + +2. **`KeyError: 'history_messages'`** + - **Cause**: Pipeline status not initialized + - **Solution**: Call `await initialize_pipeline_status()` after initializing storages + +3. **Both errors in sequence** + - **Cause**: Neither initialization method was called + - **Solution**: Always follow this pattern: + ```python + rag = LightRAG(...) + await rag.initialize_storages() + await initialize_pipeline_status() + ``` + +### Model Switching Issues + +When switching between different embedding models, you must clear the data directory to avoid errors. The only file you may want to preserve is `kv_store_llm_response_cache.json` if you wish to retain the LLM cache. + ## LightRAG API The LightRAG Server is designed to provide Web UI and API support. **For more information about LightRAG Server, please refer to [LightRAG Server](./lightrag/api/README.md).** @@ -1519,7 +1546,47 @@ def extract_queries(file_path): -## Star History +## πŸ”— Related Projects + +*Ecosystem & Extensions* + +
+ + + + + + +
+ +
+ πŸ“Έ +
+ RAG-Anything
+ Multimodal RAG +
+
+ +
+ πŸŽ₯ +
+ VideoRAG
+ Extreme Long-Context Video RAG +
+
+ +
+ ✨ +
+ MiniRAG
+ Extremely Simple RAG +
+
+
+ +--- + +## ⭐ Star History @@ -1529,42 +1596,22 @@ def extract_queries(file_path): -## Contribution +## 🀝 Contribution -Thank you to all our contributors! +
+ We thank all our contributors for their valuable contributions. +
- - - +
+ + + +
-## Troubleshooting +--- -### Common Initialization Errors -If you encounter these errors when using LightRAG: - -1. **`AttributeError: __aenter__`** - - **Cause**: Storage backends not initialized - - **Solution**: Call `await rag.initialize_storages()` after creating the LightRAG instance - -2. **`KeyError: 'history_messages'`** - - **Cause**: Pipeline status not initialized - - **Solution**: Call `await initialize_pipeline_status()` after initializing storages - -3. **Both errors in sequence** - - **Cause**: Neither initialization method was called - - **Solution**: Always follow this pattern: - ```python - rag = LightRAG(...) - await rag.initialize_storages() - await initialize_pipeline_status() - ``` - -### Model Switching Issues - -When switching between different embedding models, you must clear the data directory to avoid errors. The only file you may want to preserve is `kv_store_llm_response_cache.json` if you wish to retain the LLM cache. - -## 🌟Citation +## πŸ“– Citation ```python @article{guo2024lightrag, @@ -1577,4 +1624,31 @@ primaryClass={cs.IR} } ``` -**Thank you for your interest in our work!** +--- + +
+
+ +
+
+ + + + + + + + + +
+
+ +
+
+
+ ⭐ + Thank you for visiting LightRAG! + ⭐ +
+
+
diff --git a/lightrag/llm/ollama.py b/lightrag/llm/ollama.py index c86aea1c..5cd8a721 100644 --- a/lightrag/llm/ollama.py +++ b/lightrag/llm/ollama.py @@ -50,7 +50,7 @@ async def _ollama_model_if_cache( kwargs.pop("max_tokens", None) # kwargs.pop("response_format", None) # allow json host = kwargs.pop("host", None) - timeout = kwargs.pop("timeout", None) or 300 # Default timeout 300s + timeout = kwargs.pop("timeout", None) or 600 # Default timeout 600s kwargs.pop("hashing_kv", None) api_key = kwargs.pop("api_key", None) headers = { @@ -146,7 +146,7 @@ async def ollama_embed(texts: list[str], embed_model, **kwargs) -> np.ndarray: headers["Authorization"] = f"Bearer {api_key}" host = kwargs.pop("host", None) - timeout = kwargs.pop("timeout", None) or 90 # Default time out 90s + timeout = kwargs.pop("timeout", None) or 300 # Default time out 300s ollama_client = ollama.AsyncClient(host=host, timeout=timeout, headers=headers)