diff --git a/README.md b/README.md
index 1234e554..8fdf4439 100644
--- a/README.md
+++ b/README.md
@@ -192,7 +192,7 @@ async def main():
rag.insert("Your text")
# Perform hybrid search
- mode="hybrid"
+ mode = "hybrid"
print(
await rag.query(
"What are the top themes in this story?",
@@ -1254,6 +1254,33 @@ Valid modes are:
+## Troubleshooting
+
+### Common Initialization Errors
+
+If you encounter these errors when using LightRAG:
+
+1. **`AttributeError: __aenter__`**
+ - **Cause**: Storage backends not initialized
+ - **Solution**: Call `await rag.initialize_storages()` after creating the LightRAG instance
+
+2. **`KeyError: 'history_messages'`**
+ - **Cause**: Pipeline status not initialized
+ - **Solution**: Call `await initialize_pipeline_status()` after initializing storages
+
+3. **Both errors in sequence**
+ - **Cause**: Neither initialization method was called
+ - **Solution**: Always follow this pattern:
+ ```python
+ rag = LightRAG(...)
+ await rag.initialize_storages()
+ await initialize_pipeline_status()
+ ```
+
+### Model Switching Issues
+
+When switching between different embedding models, you must clear the data directory to avoid errors. The only file you may want to preserve is `kv_store_llm_response_cache.json` if you wish to retain the LLM cache.
+
## LightRAG API
The LightRAG Server is designed to provide Web UI and API support. **For more information about LightRAG Server, please refer to [LightRAG Server](./lightrag/api/README.md).**
@@ -1519,7 +1546,47 @@ def extract_queries(file_path):
-## Star History
+## π Related Projects
+
+*Ecosystem & Extensions*
+
+
+
+---
+
+## β Star History
@@ -1529,42 +1596,22 @@ def extract_queries(file_path):
-## Contribution
+## π€ Contribution
-Thank you to all our contributors!
+
+ We thank all our contributors for their valuable contributions.
+
-
-
-
+
-## Troubleshooting
+---
-### Common Initialization Errors
-If you encounter these errors when using LightRAG:
-
-1. **`AttributeError: __aenter__`**
- - **Cause**: Storage backends not initialized
- - **Solution**: Call `await rag.initialize_storages()` after creating the LightRAG instance
-
-2. **`KeyError: 'history_messages'`**
- - **Cause**: Pipeline status not initialized
- - **Solution**: Call `await initialize_pipeline_status()` after initializing storages
-
-3. **Both errors in sequence**
- - **Cause**: Neither initialization method was called
- - **Solution**: Always follow this pattern:
- ```python
- rag = LightRAG(...)
- await rag.initialize_storages()
- await initialize_pipeline_status()
- ```
-
-### Model Switching Issues
-
-When switching between different embedding models, you must clear the data directory to avoid errors. The only file you may want to preserve is `kv_store_llm_response_cache.json` if you wish to retain the LLM cache.
-
-## πCitation
+## π Citation
```python
@article{guo2024lightrag,
@@ -1577,4 +1624,31 @@ primaryClass={cs.IR}
}
```
-**Thank you for your interest in our work!**
+---
+
+
+
+

+
+
+
+
+
+
+
+ β
+ Thank you for visiting LightRAG!
+ β
+
+
+
diff --git a/lightrag/llm/ollama.py b/lightrag/llm/ollama.py
index c86aea1c..5cd8a721 100644
--- a/lightrag/llm/ollama.py
+++ b/lightrag/llm/ollama.py
@@ -50,7 +50,7 @@ async def _ollama_model_if_cache(
kwargs.pop("max_tokens", None)
# kwargs.pop("response_format", None) # allow json
host = kwargs.pop("host", None)
- timeout = kwargs.pop("timeout", None) or 300 # Default timeout 300s
+ timeout = kwargs.pop("timeout", None) or 600 # Default timeout 600s
kwargs.pop("hashing_kv", None)
api_key = kwargs.pop("api_key", None)
headers = {
@@ -146,7 +146,7 @@ async def ollama_embed(texts: list[str], embed_model, **kwargs) -> np.ndarray:
headers["Authorization"] = f"Bearer {api_key}"
host = kwargs.pop("host", None)
- timeout = kwargs.pop("timeout", None) or 90 # Default time out 90s
+ timeout = kwargs.pop("timeout", None) or 300 # Default time out 300s
ollama_client = ollama.AsyncClient(host=host, timeout=timeout, headers=headers)