From b3465107eaed6258b04ea076f2901dc13d0160cc Mon Sep 17 00:00:00 2001 From: kqlio67 <> Date: Tue, 3 Dec 2024 16:15:57 +0200 Subject: [PATCH] Update g4f/models.py g4f/Provider/needs_auth/HuggingChat.py --- g4f/Provider/needs_auth/HuggingChat.py | 2 ++ g4f/models.py | 7 +++++++ 2 files changed, 9 insertions(+) diff --git a/g4f/Provider/needs_auth/HuggingChat.py b/g4f/Provider/needs_auth/HuggingChat.py index 70f37d7db9..dfdd957902 100644 --- a/g4f/Provider/needs_auth/HuggingChat.py +++ b/g4f/Provider/needs_auth/HuggingChat.py @@ -25,6 +25,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): 'Qwen/Qwen2.5-72B-Instruct', 'meta-llama/Meta-Llama-3.1-70B-Instruct', 'CohereForAI/c4ai-command-r-plus-08-2024', + 'Qwen/QwQ-32B-Preview', 'nvidia/Llama-3.1-Nemotron-70B-Instruct-HF', 'Qwen/Qwen2.5-Coder-32B-Instruct', 'meta-llama/Llama-3.2-11B-Vision-Instruct', @@ -37,6 +38,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): "qwen-2.5-72b": "Qwen/Qwen2.5-72B-Instruct", "llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct", "command-r-plus": "CohereForAI/c4ai-command-r-plus-08-2024", + "qwq-32b": "Qwen/QwQ-32B-Preview", "nemotron-70b": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF", "qwen-2.5-coder-32b": "Qwen/Qwen2.5-Coder-32B-Instruct", "llama-3.2-11b": "meta-llama/Llama-3.2-11B-Vision-Instruct", diff --git a/g4f/models.py b/g4f/models.py index e0ab9b8955..70e933ed48 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -381,6 +381,12 @@ def __all__() -> list[str]: best_provider = IterListProvider([DeepInfraChat, HuggingChat, HuggingFace]) ) +qwq_32b = Model( + name = 'qwq-32b', + base_provider = 'Qwen', + best_provider = IterListProvider([HuggingChat, HuggingFace]) +) + ### Upstage ### solar_mini = Model( name = 'solar-mini', @@ -762,6 +768,7 @@ class ModelUtils: # qwen 2.5 'qwen-2.5-72b': qwen_2_5_72b, 'qwen-2.5-coder-32b': qwen_2_5_coder_32b, + 'qwq-32b': qwq_32b, ### Upstage ### 'solar-mini': solar_mini,