Skip to content

[Fix] explicitly disable weights_only in torch.load to fix tests #1650

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions mmengine/runner/checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,7 @@ def load_from_local(filename, map_location):
filename = osp.expanduser(filename)
if not osp.isfile(filename):
raise FileNotFoundError(f'{filename} can not be found.')
checkpoint = torch.load(filename, map_location=map_location)
checkpoint = torch.load(filename, map_location=map_location, weights_only = False)
return checkpoint


Expand Down Expand Up @@ -412,7 +412,7 @@ def load_from_pavi(filename, map_location=None):
with TemporaryDirectory() as tmp_dir:
downloaded_file = osp.join(tmp_dir, model.name)
model.download(downloaded_file)
checkpoint = torch.load(downloaded_file, map_location=map_location)
checkpoint = torch.load(downloaded_file, map_location=map_location, weights_only = False)
return checkpoint


Expand All @@ -435,7 +435,7 @@ def load_from_ceph(filename, map_location=None, backend='petrel'):
file_backend = get_file_backend(
filename, backend_args={'backend': backend})
with io.BytesIO(file_backend.get(filename)) as buffer:
checkpoint = torch.load(buffer, map_location=map_location)
checkpoint = torch.load(buffer, map_location=map_location, weights_only = False)
return checkpoint


Expand Down Expand Up @@ -504,7 +504,7 @@ def load_from_openmmlab(filename, map_location=None):
filename = osp.join(_get_mmengine_home(), model_url)
if not osp.isfile(filename):
raise FileNotFoundError(f'{filename} can not be found.')
checkpoint = torch.load(filename, map_location=map_location)
checkpoint = torch.load(filename, map_location=map_location, weights_only = False)
return checkpoint


Expand Down
18 changes: 9 additions & 9 deletions tests/test_hooks/test_checkpoint_hook.py
Original file line number Diff line number Diff line change
Expand Up @@ -458,13 +458,13 @@ def test_with_runner(self, training_type):
cfg = copy.deepcopy(common_cfg)
runner = self.build_runner(cfg)
runner.train()
ckpt = torch.load(osp.join(cfg.work_dir, f'{training_type}_11.pth'))
ckpt = torch.load(osp.join(cfg.work_dir, f'{training_type}_11.pth'), weights_only=False)
self.assertIn('optimizer', ckpt)

cfg.default_hooks.checkpoint.save_optimizer = False
runner = self.build_runner(cfg)
runner.train()
ckpt = torch.load(osp.join(cfg.work_dir, f'{training_type}_11.pth'))
ckpt = torch.load(osp.join(cfg.work_dir, f'{training_type}_11.pth'), weights_only=False)
self.assertNotIn('optimizer', ckpt)

# Test save_param_scheduler=False
Expand All @@ -479,13 +479,13 @@ def test_with_runner(self, training_type):
]
runner = self.build_runner(cfg)
runner.train()
ckpt = torch.load(osp.join(cfg.work_dir, f'{training_type}_11.pth'))
ckpt = torch.load(osp.join(cfg.work_dir, f'{training_type}_11.pth'), weights_only=False)
self.assertIn('param_schedulers', ckpt)

cfg.default_hooks.checkpoint.save_param_scheduler = False
runner = self.build_runner(cfg)
runner.train()
ckpt = torch.load(osp.join(cfg.work_dir, f'{training_type}_11.pth'))
ckpt = torch.load(osp.join(cfg.work_dir, f'{training_type}_11.pth'), weights_only=False)
self.assertNotIn('param_schedulers', ckpt)

self.clear_work_dir()
Expand Down Expand Up @@ -533,7 +533,7 @@ def test_with_runner(self, training_type):
self.assertFalse(
osp.isfile(osp.join(cfg.work_dir, f'{training_type}_{i}.pth')))

ckpt = torch.load(osp.join(cfg.work_dir, f'{training_type}_11.pth'))
ckpt = torch.load(osp.join(cfg.work_dir, f'{training_type}_11.pth'), weights_only=False)
self.assertEqual(ckpt['message_hub']['runtime_info']['keep_ckpt_ids'],
[9, 10, 11])

Expand Down Expand Up @@ -574,9 +574,9 @@ def test_with_runner(self, training_type):
runner.train()
best_ckpt_path = osp.join(cfg.work_dir,
f'best_test_acc_{training_type}_5.pth')
best_ckpt = torch.load(best_ckpt_path)
best_ckpt = torch.load(best_ckpt_path, weights_only=False)

ckpt = torch.load(osp.join(cfg.work_dir, f'{training_type}_5.pth'))
ckpt = torch.load(osp.join(cfg.work_dir, f'{training_type}_5.pth'), weights_only=False)
self.assertEqual(best_ckpt_path,
ckpt['message_hub']['runtime_info']['best_ckpt'])

Expand All @@ -603,11 +603,11 @@ def test_with_runner(self, training_type):
runner.train()
best_ckpt_path = osp.join(cfg.work_dir,
f'best_test_acc_{training_type}_5.pth')
best_ckpt = torch.load(best_ckpt_path)
best_ckpt = torch.load(best_ckpt_path, weights_only=False)

# if the current ckpt is the best, the interval will be ignored the
# the ckpt will also be saved
ckpt = torch.load(osp.join(cfg.work_dir, f'{training_type}_5.pth'))
ckpt = torch.load(osp.join(cfg.work_dir, f'{training_type}_5.pth'), weights_only=False)
self.assertEqual(best_ckpt_path,
ckpt['message_hub']['runtime_info']['best_ckpt'])

Expand Down
10 changes: 5 additions & 5 deletions tests/test_hooks/test_ema_hook.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ def test_with_runner(self):
self.assertTrue(
isinstance(ema_hook.ema_model, ExponentialMovingAverage))

checkpoint = torch.load(osp.join(self.temp_dir.name, 'epoch_2.pth'))
checkpoint = torch.load(osp.join(self.temp_dir.name, 'epoch_2.pth'), weights_only = False)
self.assertTrue('ema_state_dict' in checkpoint)
self.assertTrue(checkpoint['ema_state_dict']['steps'] == 8)

Expand All @@ -245,7 +245,7 @@ def test_with_runner(self):
runner.test()

# Test load checkpoint without ema_state_dict
checkpoint = torch.load(osp.join(self.temp_dir.name, 'epoch_2.pth'))
checkpoint = torch.load(osp.join(self.temp_dir.name, 'epoch_2.pth'), weights_only = False)
checkpoint.pop('ema_state_dict')
torch.save(checkpoint,
osp.join(self.temp_dir.name, 'without_ema_state_dict.pth'))
Expand Down Expand Up @@ -274,7 +274,7 @@ def test_with_runner(self):
runner = self.build_runner(cfg)
runner.train()
state_dict = torch.load(
osp.join(self.temp_dir.name, 'epoch_4.pth'), map_location='cpu')
osp.join(self.temp_dir.name, 'epoch_4.pth'), map_location='cpu', weights_only = False)
self.assertIn('ema_state_dict', state_dict)
for k, v in state_dict['state_dict'].items():
assert_allclose(v, state_dict['ema_state_dict']['module.' + k])
Expand All @@ -287,12 +287,12 @@ def test_with_runner(self):
runner = self.build_runner(cfg)
runner.train()
state_dict = torch.load(
osp.join(self.temp_dir.name, 'iter_4.pth'), map_location='cpu')
osp.join(self.temp_dir.name, 'iter_4.pth'), map_location='cpu', weights_only = False)
self.assertIn('ema_state_dict', state_dict)
for k, v in state_dict['state_dict'].items():
assert_allclose(v, state_dict['ema_state_dict']['module.' + k])
state_dict = torch.load(
osp.join(self.temp_dir.name, 'iter_5.pth'), map_location='cpu')
osp.join(self.temp_dir.name, 'iter_5.pth'), map_location='cpu', weights_only = False)
self.assertIn('ema_state_dict', state_dict)

def _test_swap_parameters(self, func_name, *args, **kwargs):
Expand Down
6 changes: 3 additions & 3 deletions tests/test_runner/test_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -2272,7 +2272,7 @@ def test_checkpoint(self):
self.assertTrue(osp.exists(path))
self.assertFalse(osp.exists(osp.join(self.temp_dir, 'epoch_4.pth')))

ckpt = torch.load(path)
ckpt = torch.load(path, weights_only = False)
self.assertEqual(ckpt['meta']['epoch'], 3)
self.assertEqual(ckpt['meta']['iter'], 12)
self.assertEqual(ckpt['meta']['experiment_name'],
Expand Down Expand Up @@ -2444,7 +2444,7 @@ def test_checkpoint(self):
self.assertTrue(osp.exists(path))
self.assertFalse(osp.exists(osp.join(self.temp_dir, 'epoch_13.pth')))

ckpt = torch.load(path)
ckpt = torch.load(path, weights_only = False)
self.assertEqual(ckpt['meta']['epoch'], 0)
self.assertEqual(ckpt['meta']['iter'], 12)
assert isinstance(ckpt['optimizer'], dict)
Expand All @@ -2455,7 +2455,7 @@ def test_checkpoint(self):
self.assertEqual(message_hub.get_info('iter'), 11)
# 2.1.2 check class attribute _statistic_methods can be saved
HistoryBuffer._statistics_methods.clear()
ckpt = torch.load(path)
ckpt = torch.load(path, weights_only = False)
self.assertIn('min', HistoryBuffer._statistics_methods)

# 2.2 test `load_checkpoint`
Expand Down