From a9472e365242c1e2600f4ff15c4f44258cbe9d6f Mon Sep 17 00:00:00 2001 From: Billy Bao Date: Wed, 24 Sep 2025 16:52:12 +0800 Subject: [PATCH] add Qwen models (#10263) ### What problem does this PR solve? add Qwen models ### Type of change - [x] New Feature (non-breaking change which adds functionality) --- conf/llm_factories.json | 42 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/conf/llm_factories.json b/conf/llm_factories.json index b9f8d3869..fe825aab0 100644 --- a/conf/llm_factories.json +++ b/conf/llm_factories.json @@ -408,6 +408,48 @@ "model_type": "chat", "is_tools": true }, + { + "llm_name": "Qwen/Qwen3-Max", + "tags": "LLM,CHAT,256k", + "max_tokens": 256000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "Qwen/Qwen3-VL-Plus", + "tags": "LLM,IMAGE2TEXT", + "max_tokens": 256000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "Qwen/Qwen3-VL-23B-A22B-Thinking", + "tags": "LLM,IMAGE2TEXT", + "max_tokens": 124000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "Qwen/Qwen3-Omni-Flash-Realtime", + "tags": "LLM,IMAGE2TEXT", + "max_tokens": 64000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "Qwen/Qwen3-Omni-Flash", + "tags": "LLM,IMAGE2TEXT", + "max_tokens": 64000, + "model_type": "chat", + "is_tools": true + }, + { + "llm_name": "Qwen/Qwen-Image-Plus", + "tags": "LLM,IMAGE,IMAGE2TEXT", + "max_tokens": 0, + "model_type": "image", + "is_tools": true + }, { "llm_name": "qwen3-coder-480b-a35b-instruct", "tags": "LLM,CHAT,256k",