diff --git a/env.example b/env.example index e4521132..01b87586 100644 --- a/env.example +++ b/env.example @@ -91,6 +91,8 @@ ENABLE_LLM_CACHE=true ### For rerank model deployed by vLLM use cohere binding ######################################################### RERANK_BINDING=null +### Enable rerank by default in query params +# RERANK_BY_DEFAULT=True ### rerank score chunk filter(set to 0.0 to keep all chunks, 0.6 or above if LLM is not strong enought) # MIN_RERANK_SCORE=0.0 diff --git a/lightrag/api/lightrag_server.py b/lightrag/api/lightrag_server.py index 7d94eec4..328e7953 100644 --- a/lightrag/api/lightrag_server.py +++ b/lightrag/api/lightrag_server.py @@ -391,7 +391,7 @@ def create_app(args): ), ) - # Configure rerank function based on enable_rerank parameter + # Configure rerank function based on args.rerank_bindingparameter rerank_model_func = None if args.rerank_binding != "null": from lightrag.rerank import cohere_rerank, jina_rerank, ali_rerank diff --git a/lightrag/base.py b/lightrag/base.py index cfe48eea..fe92e785 100644 --- a/lightrag/base.py +++ b/lightrag/base.py @@ -157,7 +157,7 @@ class QueryParam: If proivded, this will be use instead of the default vaulue from prompt template. """ - enable_rerank: bool = os.getenv("ENABLE_RERANK", "false").lower() == "true" + enable_rerank: bool = os.getenv("RERANK_BY_DEFAULT", "true").lower() == "true" """Enable reranking for retrieved text chunks. If True but no rerank model is configured, a warning will be issued. Default is True to enable reranking when rerank model is available. """