Add gleaning configuration display to frontend status

- Backend: Add MAX_GLEANING env var support in config.py
- Backend: Pass entity_extract_max_gleaning to LightRAG initialization
- Backend: Include gleaning config in /health status API response
- Frontend: Add gleaning to LightragStatus TypeScript type
- Frontend: Display gleaning rounds in StatusCard with quality/speed tradeoff info
- i18n: Add English and Chinese translations for gleaning UI
- Config: Document MAX_GLEANING parameter in env.example

This allows users to see their current gleaning configuration (0=disabled for 2x speed, 1=enabled for higher quality) in the frontend status display.
This commit is contained in:
Claude 2025-11-19 12:13:56 +00:00
parent 63e928d75c
commit 49a485b414
No known key found for this signature in database
7 changed files with 22 additions and 0 deletions

View file

@ -167,6 +167,9 @@ MAX_PARALLEL_INSERT=2
# EMBEDDING_FUNC_MAX_ASYNC=8
### Num of chunks send to Embedding in single request
# EMBEDDING_BATCH_NUM=10
### Entity extraction gleaning rounds (0=disabled for 2x speedup, 1=enabled for higher quality)
### Recommended: 0 for self-hosted models or speed priority, 1 for cloud APIs with quality priority
MAX_GLEANING=1
###########################################################################
### LLM Configuration

View file

@ -346,6 +346,9 @@ def parse_args() -> argparse.Namespace:
# Get MAX_GRAPH_NODES from environment
args.max_graph_nodes = get_env_value("MAX_GRAPH_NODES", 1000, int)
# Get ENTITY_EXTRACT_MAX_GLEANING from environment
args.entity_extract_max_gleaning = get_env_value("MAX_GLEANING", 1, int)
# Handle openai-ollama special case
if args.llm_binding == "openai-ollama":
args.llm_binding = "openai"

View file

@ -1021,6 +1021,7 @@ def create_app(args):
rerank_model_func=rerank_model_func,
max_parallel_insert=args.max_parallel_insert,
max_graph_nodes=args.max_graph_nodes,
entity_extract_max_gleaning=args.entity_extract_max_gleaning,
addon_params={
"language": args.summary_language,
"entity_types": args.entity_types,
@ -1195,6 +1196,7 @@ def create_app(args):
"max_async": args.max_async,
"embedding_func_max_async": args.embedding_func_max_async,
"embedding_batch_num": args.embedding_batch_num,
"entity_extract_max_gleaning": args.entity_extract_max_gleaning,
},
"auth_mode": auth_mode,
"pipeline_busy": pipeline_status.get("busy", False),

View file

@ -54,6 +54,7 @@ export type LightragStatus = {
cosine_threshold: number
min_rerank_score: number
related_chunk_number: number
entity_extract_max_gleaning: number
}
update_status?: Record<string, any>
core_version?: string

View file

@ -22,6 +22,13 @@ const StatusCard = ({ status }: { status: LightragStatus | null }) => {
<span>cosine {status.configuration.cosine_threshold} / rerank_score {status.configuration.min_rerank_score} / max_related {status.configuration.related_chunk_number}</span>
<span>{t('graphPanel.statusCard.maxParallelInsert')}:</span>
<span>{status.configuration.max_parallel_insert}</span>
<span>{t('graphPanel.statusCard.entityExtractGleaning')}:</span>
<span>
{status.configuration.entity_extract_max_gleaning === 0
? t('graphPanel.statusCard.gleaningDisabled')
: `${status.configuration.entity_extract_max_gleaning} ${t('graphPanel.statusCard.gleaningRounds')}`
}
</span>
</div>
</div>

View file

@ -278,6 +278,9 @@
"workingDirectory": "Working Directory",
"inputDirectory": "Input Directory",
"maxParallelInsert": "Concurrent Doc Processing",
"entityExtractGleaning": "Gleaning (Entity Extraction)",
"gleaningDisabled": "Disabled (2x faster)",
"gleaningRounds": "rounds (higher quality)",
"summarySettings": "Summary Settings",
"llmConfig": "LLM Configuration",
"llmBinding": "LLM Binding",

View file

@ -278,6 +278,9 @@
"workingDirectory": "工作目录",
"inputDirectory": "输入目录",
"maxParallelInsert": "并行处理文档",
"entityExtractGleaning": "Gleaning实体提取拾遗",
"gleaningDisabled": "已禁用速度快2倍",
"gleaningRounds": "轮(质量更高)",
"summarySettings": "摘要设置",
"llmConfig": "LLM配置",
"llmBinding": "LLM绑定",