From 40b2c48957dd06a661d3772ab5474f98c46a7d31 Mon Sep 17 00:00:00 2001 From: Liu An Date: Wed, 29 Oct 2025 19:38:57 +0800 Subject: [PATCH] Chore(config): remove Youdao and BAAI embedding model providers (#10873) ### What problem does this PR solve? This commit removes the Youdao and BAAI entries from the LLM factories configuration as they are no longer needed or supported. ### Type of change - [x] Config update --- api/apps/llm_app.py | 2 +- conf/llm_factories.json | 28 ---------------------------- 2 files changed, 1 insertion(+), 29 deletions(-) diff --git a/api/apps/llm_app.py b/api/apps/llm_app.py index d14f6fa15..19b25325f 100644 --- a/api/apps/llm_app.py +++ b/api/apps/llm_app.py @@ -368,7 +368,7 @@ def my_llms(): @manager.route('/list', methods=['GET']) # noqa: F821 @login_required def list_app(): - self_deployed = ["Youdao", "FastEmbed", "BAAI", "Ollama", "Xinference", "LocalAI", "LM-Studio", "GPUStack"] + self_deployed = ["FastEmbed", "Ollama", "Xinference", "LocalAI", "LM-Studio", "GPUStack"] weighted = [] model_type = request.args.get("model_type") try: diff --git a/conf/llm_factories.json b/conf/llm_factories.json index 0d8c11035..1c069e8dc 100644 --- a/conf/llm_factories.json +++ b/conf/llm_factories.json @@ -974,20 +974,6 @@ "status": "1", "llm": [] }, - { - "name": "Youdao", - "logo": "", - "tags": "TEXT EMBEDDING", - "status": "1", - "llm": [ - { - "llm_name": "maidalun1020/bce-embedding-base_v1", - "tags": "TEXT EMBEDDING,", - "max_tokens": 512, - "model_type": "embedding" - } - ] - }, { "name": "DeepSeek", "logo": "", @@ -1140,20 +1126,6 @@ } ] }, - { - "name": "BAAI", - "logo": "", - "tags": "TEXT EMBEDDING", - "status": "1", - "llm": [ - { - "llm_name": "BAAI/bge-large-zh-v1.5", - "tags": "TEXT EMBEDDING,", - "max_tokens": 1024, - "model_type": "embedding" - } - ] - }, { "name": "Builtin", "logo": "",