From 8948a430ca50a310561d0e601a1095e6ddd8fa69 Mon Sep 17 00:00:00 2001 From: rnwang04 Date: Thu, 5 Dec 2024 17:14:11 +0800 Subject: [PATCH] add down proj back for q4_0 --- python/llm/src/ipex_llm/transformers/npu_models/convert.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/python/llm/src/ipex_llm/transformers/npu_models/convert.py b/python/llm/src/ipex_llm/transformers/npu_models/convert.py index 4aa45b0054a..2842799b160 100644 --- a/python/llm/src/ipex_llm/transformers/npu_models/convert.py +++ b/python/llm/src/ipex_llm/transformers/npu_models/convert.py @@ -93,6 +93,10 @@ def replace_with_QuantizedLinear(layer, qtype, device, modules_to_not_convert, if (layer.in_features == 3584 and layer.out_features == 152064): qtype = "sym_int8_rtn" iqtype = ggml_tensor_qtype[qtype] + if qtype == "sym_int4_rtn": + if (layer.in_features == 18944 and layer.out_features == 3584): + qtype = "sym_int8_rtn" + iqtype = ggml_tensor_qtype[qtype] enable_scale_search = os.environ.get("IPEX_LLM_NPU_QUANTIZATION_OPT", "0") != "0" qweights, scale = ggml_convert_qtype(layer.weight.data.to(torch.float32), iqtype, device=device,