From d272f6b4715c5c9677c2e3bef4de5d7b14245379 Mon Sep 17 00:00:00 2001 From: Heyang Sun <60865256+Uxito-Ada@users.noreply.github.com> Date: Thu, 28 Nov 2024 13:26:46 +0800 Subject: [PATCH] remove nf4 unsupport comment in cpu finetuning (#12460) Co-authored-by: Ariadne --- .../alpaca-qlora/alpaca_qlora_finetuning_cpu.py | 2 +- python/llm/example/CPU/QLoRA-FineTuning/qlora_finetuning_cpu.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/python/llm/example/CPU/QLoRA-FineTuning/alpaca-qlora/alpaca_qlora_finetuning_cpu.py b/python/llm/example/CPU/QLoRA-FineTuning/alpaca-qlora/alpaca_qlora_finetuning_cpu.py index e7cd6eb296a..b090751d368 100644 --- a/python/llm/example/CPU/QLoRA-FineTuning/alpaca-qlora/alpaca_qlora_finetuning_cpu.py +++ b/python/llm/example/CPU/QLoRA-FineTuning/alpaca-qlora/alpaca_qlora_finetuning_cpu.py @@ -181,7 +181,7 @@ def train( bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=False, - bnb_4bit_quant_type="int4", # nf4 not supported on cpu yet + bnb_4bit_quant_type="int4", bnb_4bit_compute_dtype=torch.bfloat16 ) model = AutoModelForCausalLM.from_pretrained(base_model, diff --git a/python/llm/example/CPU/QLoRA-FineTuning/qlora_finetuning_cpu.py b/python/llm/example/CPU/QLoRA-FineTuning/qlora_finetuning_cpu.py index 6b1770567d9..c2e7356d546 100644 --- a/python/llm/example/CPU/QLoRA-FineTuning/qlora_finetuning_cpu.py +++ b/python/llm/example/CPU/QLoRA-FineTuning/qlora_finetuning_cpu.py @@ -62,7 +62,7 @@ bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=False, - bnb_4bit_quant_type="int4", # nf4 not supported on cpu yet + bnb_4bit_quant_type="int4", bnb_4bit_compute_dtype=torch.bfloat16 ) model = AutoModelForCausalLM.from_pretrained(model_path,