Skip to content

Commit e27f3dc

Browse files
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
1 parent bf12522 commit e27f3dc

File tree

6 files changed

+14
-9
lines changed

6 files changed

+14
-9
lines changed

neural_compressor/common/base_config.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -224,7 +224,7 @@ def _post_init(self):
224224
self._is_initialized = True
225225

226226
def __setattr__(self, name, value):
227-
"""Override the setattr function to propagate updates"""
227+
"""Override the setattr function to propagate updates."""
228228
super().__setattr__(name, value)
229229
if self._is_initialized and name in self.params_list:
230230
self._is_initialized = False

neural_compressor/torch/algorithms/layer_wise/utils.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -259,7 +259,7 @@ def load_first_layer_only(user_model, model_name):
259259
model_name (str): model name or path
260260
"""
261261
for name, m in user_model.named_modules():
262-
if ('layers' not in name or 'layers.0' in name) and len(name) > 0 and len(list(m.named_children())) == 0:
262+
if ("layers" not in name or "layers.0" in name) and len(name) > 0 and len(list(m.named_children())) == 0:
263263
load_module(user_model, name, get_path(model_name), device="hpu" if is_hpu_available() else "cpu")
264264

265265

@@ -357,6 +357,7 @@ def clean_module_weight(module):
357357
kwargs = submodule._parameters[n].__dict__
358358
if is_hpu_available:
359359
from habana_frameworks.torch.core import weight_sharing
360+
360361
if param_cls == weight_sharing.HabanaParameterWrapper:
361362
try:
362363
kwargs.pop("change_device_placement")

neural_compressor/torch/algorithms/weight_only/save_load.py

+4-3
Original file line numberDiff line numberDiff line change
@@ -93,14 +93,15 @@ def save(model, output_dir="./saved_results", format=SaveLoadFormat.DEFAULT, **k
9393
qconfig_file_path = os.path.join(output_folder, QCONFIG_NAME)
9494
# saving process
9595
save_config_mapping(model.qconfig, qconfig_file_path)
96-
if 'blockwise' in kwargs:
97-
from neural_compressor.torch.algorithms.layer_wise import save_layers_in_shards_iteratively, LWQ_WORKSPACE
96+
if "blockwise" in kwargs:
97+
from neural_compressor.torch.algorithms.layer_wise import LWQ_WORKSPACE, save_layers_in_shards_iteratively
98+
9899
checkpoints_folder = kwargs.get("blockwise_load_folder", None)
99100
if not checkpoints_folder:
100101
checkpoints_folder = LWQ_WORKSPACE
101102
save_layers_in_shards_iteratively(checkpoints_folder, output_folder, layers_per_shard=8)
102103
else:
103-
model_state_dict = model.state_dict() # if 'model_state_dict' not in kwargs else kwargs['model_state_dict']
104+
model_state_dict = model.state_dict() # if 'model_state_dict' not in kwargs else kwargs['model_state_dict']
104105
torch.save(model_state_dict, qmodel_weight_file_path)
105106
logger.info("Save quantized model weight to {}.".format(qmodel_weight_file_path))
106107
logger.info("Save configuration of quantized model to {}.".format(qconfig_file_path))

neural_compressor/torch/quantization/quantize.py

+5-2
Original file line numberDiff line numberDiff line change
@@ -157,8 +157,11 @@ def prepare(
157157
else:
158158
model_info = quant_config.get_model_info(model=prepared_model)
159159

160-
if (hasattr(quant_config, "model_path") and quant_config.model_path == ""
161-
and hasattr(prepared_model, "name_or_path")):
160+
if (
161+
hasattr(quant_config, "model_path")
162+
and quant_config.model_path == ""
163+
and hasattr(prepared_model, "name_or_path")
164+
):
162165
quant_config.model_path = prepared_model.name_or_path
163166
configs_mapping = quant_config.to_config_mapping(model_info=model_info)
164167
logger.debug(configs_mapping)

neural_compressor/transformers/quantization/utils.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -629,8 +629,8 @@ def set_nontext_module_config(model, to_quant_block_names, config):
629629
def convert_to_GPTQ_checkpoints(model, quantization_config):
630630
from intel_extension_for_pytorch.nn.modules import WeightOnlyQuantizedLinear as ipex_cpu_linear
631631

632-
from neural_compressor.torch.utils import set_module
633632
from neural_compressor.torch.algorithms.weight_only.modules import INCWeightOnlyLinear
633+
from neural_compressor.torch.utils import set_module
634634

635635
dtype = "int4" if quantization_config.bits == 4 else "int8"
636636
bits = quantization_config.bits

test/3x/torch/requirements.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
auto_round
2+
datasets
23
deepspeed @ git+https://github.com/HabanaAI/[email protected]
34
expecttest
45
intel_extension_for_pytorch
@@ -9,4 +10,3 @@ psutil
910
pytest
1011
torchvision
1112
transformers
12-
datasets

0 commit comments

Comments
 (0)